text
stringlengths 7
1.24M
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
519
|
|---|---|---|---|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionXLKDiffusionPipeline
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
enable_full_determinism()
@slow
@require_torch_gpu
class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
dtype = torch.float16
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_stable_diffusion_xl(self):
sd_pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=self.dtype
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
sd_pipe.set_scheduler("sample_euler")
prompt = "A painting of a squirrel eating a burger"
generator = torch.manual_seed(0)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=9.0,
num_inference_steps=20,
height=512,
width=512,
output_type="np",
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array(
[0.79600024, 0.796546, 0.80682373, 0.79428387, 0.7905743, 0.8008807, 0.786183, 0.7835959, 0.797892]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_stable_diffusion_karras_sigmas(self):
sd_pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=self.dtype
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
sd_pipe.set_scheduler("sample_dpmpp_2m")
prompt = "A painting of a squirrel eating a burger"
generator = torch.manual_seed(0)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=7.5,
num_inference_steps=15,
output_type="np",
use_karras_sigmas=True,
height=512,
width=512,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array(
[0.9506951, 0.9527786, 0.95309967, 0.9511477, 0.952523, 0.9515326, 0.9511933, 0.9480397, 0.94930184]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_stable_diffusion_noise_sampler_seed(self):
sd_pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=self.dtype
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
sd_pipe.set_scheduler("sample_dpmpp_sde")
prompt = "A painting of a squirrel eating a burger"
seed = 0
images1 = sd_pipe(
[prompt],
generator=torch.manual_seed(seed),
noise_sampler_seed=seed,
guidance_scale=9.0,
num_inference_steps=20,
output_type="np",
height=512,
width=512,
).images
images2 = sd_pipe(
[prompt],
generator=torch.manual_seed(seed),
noise_sampler_seed=seed,
guidance_scale=9.0,
num_inference_steps=20,
output_type="np",
height=512,
width=512,
).images
assert images1.shape == (1, 512, 512, 3)
assert images2.shape == (1, 512, 512, 3)
assert np.abs(images1.flatten() - images2.flatten()).max() < 1e-2
|
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_k_diffusion.py/0
|
{
"file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_k_diffusion.py",
"repo_id": "diffusers",
"token_count": 2166
}
| 161
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import gc
import inspect
import io
import re
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, TextToVideoZeroSDXLPipeline, UNet2DConditionModel
from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineFromPipeTesterMixin, PipelineTesterMixin
enable_full_determinism()
def to_np(tensor):
if isinstance(tensor, torch.Tensor):
tensor = tensor.detach().cpu().numpy()
return tensor
class TextToVideoZeroSDXLPipelineFastTests(PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase):
pipeline_class = TextToVideoZeroSDXLPipeline
params = TEXT_TO_IMAGE_PARAMS
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
generator_device = "cpu"
def get_dummy_components(self, seed=0):
torch.manual_seed(seed)
unet = UNet2DConditionModel(
block_out_channels=(2, 4),
layers_per_block=2,
sample_size=2,
norm_num_groups=2,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
# SD2-specific config below
attention_head_dim=(2, 4),
use_linear_projection=True,
addition_embed_type="text_time",
addition_time_embed_dim=8,
transformer_layers_per_block=(1, 2),
projection_class_embeddings_input_dim=80, # 6 * 8 + 32
cross_attention_dim=64,
)
scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_start=0.0001,
beta_end=0.02,
beta_schedule="linear",
trained_betas=None,
clip_sample=True,
set_alpha_to_one=True,
steps_offset=0,
prediction_type="epsilon",
thresholding=False,
dynamic_thresholding_ratio=0.995,
clip_sample_range=1.0,
sample_max_value=1.0,
timestep_spacing="leading",
rescale_betas_zero_snr=False,
)
torch.manual_seed(seed)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
sample_size=128,
)
torch.manual_seed(seed)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
# SD2-specific config below
hidden_act="gelu",
projection_dim=32,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_2,
"tokenizer_2": tokenizer_2,
"image_encoder": None,
"feature_extractor": None,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A panda dancing in Antarctica",
"generator": generator,
"num_inference_steps": 5,
"t0": 1,
"t1": 3,
"height": 64,
"width": 64,
"video_length": 3,
"output_type": "np",
}
return inputs
def get_generator(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
return generator
def test_text_to_video_zero_sdxl(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
inputs = self.get_dummy_inputs(self.generator_device)
result = pipe(**inputs).images
first_frame_slice = result[0, -3:, -3:, -1]
last_frame_slice = result[-1, -3:, -3:, 0]
expected_slice1 = np.array(
[0.6008109, 0.73051643, 0.51778656, 0.55817354, 0.45222935, 0.45998418, 0.57017255, 0.54874814, 0.47078788]
)
expected_slice2 = np.array(
[0.6011751, 0.47420046, 0.41660714, 0.6472957, 0.41261768, 0.5438129, 0.7401535, 0.6756011, 0.53652245]
)
assert np.abs(first_frame_slice.flatten() - expected_slice1).max() < 1e-2
assert np.abs(last_frame_slice.flatten() - expected_slice2).max() < 1e-2
@unittest.skip(
reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor."
)
def test_attention_slicing_forward_pass(self):
pass
def test_cfg(self):
sig = inspect.signature(self.pipeline_class.__call__)
if "guidance_scale" not in sig.parameters:
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(self.generator_device)
inputs["guidance_scale"] = 1.0
out_no_cfg = pipe(**inputs)[0]
inputs["guidance_scale"] = 7.5
out_cfg = pipe(**inputs)[0]
assert out_cfg.shape == out_no_cfg.shape
def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(self.generator_device))[0]
output_tuple = pipe(**self.get_dummy_inputs(self.generator_device), return_dict=False)[0]
max_diff = np.abs(to_np(output) - to_np(output_tuple)).max()
self.assertLess(max_diff, expected_max_difference)
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
def test_float16_inference(self, expected_max_diff=5e-2):
components = self.get_dummy_components()
for name, module in components.items():
if hasattr(module, "half"):
components[name] = module.to(torch_device).half()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
components = self.get_dummy_components()
pipe_fp16 = self.pipeline_class(**components)
pipe_fp16.to(torch_device, torch.float16)
pipe_fp16.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(self.generator_device)
# # Reset generator in case it is used inside dummy inputs
if "generator" in inputs:
inputs["generator"] = self.get_generator(self.generator_device)
output = pipe(**inputs)[0]
fp16_inputs = self.get_dummy_inputs(self.generator_device)
# Reset generator in case it is used inside dummy inputs
if "generator" in fp16_inputs:
fp16_inputs["generator"] = self.get_generator(self.generator_device)
output_fp16 = pipe_fp16(**fp16_inputs)[0]
max_diff = np.abs(to_np(output) - to_np(output_fp16)).max()
self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.")
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def test_inference_batch_consistent(self):
pass
@unittest.skip(
reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor."
)
def test_inference_batch_single_identical(self):
pass
@unittest.skipIf(
torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"),
reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher",
)
def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(self.generator_device)
output_without_offload = pipe(**inputs)[0]
pipe.enable_model_cpu_offload()
inputs = self.get_dummy_inputs(self.generator_device)
output_with_offload = pipe(**inputs)[0]
max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()
self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results")
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def test_pipeline_call_signature(self):
pass
def test_progress_bar(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
inputs = self.get_dummy_inputs(self.generator_device)
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
stderr = stderr.getvalue()
# we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img,
# so we just match "5" in "#####| 1/5 [00:01<00:00]"
max_steps = re.search("/(.*?) ", stderr).group(1)
self.assertTrue(max_steps is not None and len(max_steps) > 0)
self.assertTrue(
f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step"
)
pipe.set_progress_bar_config(disable=True)
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled")
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
def test_save_load_float16(self, expected_max_diff=1e-2):
components = self.get_dummy_components()
for name, module in components.items():
if hasattr(module, "half"):
components[name] = module.to(torch_device).half()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(self.generator_device)
output = pipe(**inputs)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir)
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16)
pipe_loaded.to(torch_device)
pipe_loaded.set_progress_bar_config(disable=None)
for name, component in pipe_loaded.components.items():
if hasattr(component, "dtype"):
self.assertTrue(
component.dtype == torch.float16,
f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.",
)
inputs = self.get_dummy_inputs(self.generator_device)
output_loaded = pipe_loaded(**inputs)[0]
max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
self.assertLess(
max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading."
)
@unittest.skip(
reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor."
)
def test_save_load_local(self):
pass
@unittest.skip(
reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor."
)
def test_save_load_optional_components(self):
pass
@unittest.skip(
reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor."
)
def test_sequential_cpu_offload_forward_pass(self):
pass
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
def test_to_device(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to("cpu")
model_devices = [component.device.type for component in components.values() if hasattr(component, "device")]
self.assertTrue(all(device == "cpu" for device in model_devices))
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu
self.assertTrue(np.isnan(output_cpu).sum() == 0)
pipe.to("cuda")
model_devices = [component.device.type for component in components.values() if hasattr(component, "device")]
self.assertTrue(all(device == "cuda" for device in model_devices))
output_cuda = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
@unittest.skip(
reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor."
)
def test_xformers_attention_forwardGenerator_pass(self):
pass
@nightly
@require_torch_gpu
class TextToVideoZeroSDXLPipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_full_model(self):
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
pipe = TextToVideoZeroSDXLPipeline.from_pretrained(
model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.enable_model_cpu_offload()
pipe.enable_vae_slicing()
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = "A panda dancing in Antarctica"
result = pipe(prompt=prompt, generator=generator).images
first_frame_slice = result[0, -3:, -3:, -1]
last_frame_slice = result[-1, -3:, -3:, 0]
expected_slice1 = np.array([0.57, 0.57, 0.57, 0.57, 0.57, 0.56, 0.55, 0.56, 0.56])
expected_slice2 = np.array([0.54, 0.53, 0.53, 0.53, 0.53, 0.52, 0.53, 0.53, 0.53])
assert np.abs(first_frame_slice.flatten() - expected_slice1).max() < 1e-2
assert np.abs(last_frame_slice.flatten() - expected_slice2).max() < 1e-2
|
diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py/0
|
{
"file_path": "diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py",
"repo_id": "diffusers",
"token_count": 7435
}
| 162
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class DDPMSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DDPMScheduler,)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_variance_type(self):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=variance)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def test_prediction_type(self):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_time_indices(self):
for t in [0, 500, 999]:
self.check_over_forward(time_step=t)
def test_variance(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
def test_rescale_betas_zero_snr(self):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
def test_full_loop_no_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_trained_timesteps = len(scheduler)
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(0)
for t in reversed(range(num_trained_timesteps)):
# 1. predict noise residual
residual = model(sample, t)
# 2. predict previous mean of sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def test_full_loop_with_v_prediction(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(prediction_type="v_prediction")
scheduler = scheduler_class(**scheduler_config)
num_trained_timesteps = len(scheduler)
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(0)
for t in reversed(range(num_trained_timesteps)):
# 1. predict noise residual
residual = model(sample, t)
# 2. predict previous mean of sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def test_custom_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=timesteps)
scheduler_timesteps = scheduler.timesteps
for i, timestep in enumerate(scheduler_timesteps):
if i == len(timesteps) - 1:
expected_prev_t = -1
else:
expected_prev_t = timesteps[i + 1]
prev_t = scheduler.previous_timestep(timestep)
prev_t = prev_t.item()
self.assertEqual(prev_t, expected_prev_t)
def test_custom_timesteps_increasing_order(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 51, 0]
with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=timesteps)
def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 1, 0]
num_inference_steps = len(timesteps)
with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps)
def test_custom_timesteps_too_large(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [scheduler.config.num_train_timesteps]
with self.assertRaises(
ValueError,
msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}",
):
scheduler.set_timesteps(timesteps=timesteps)
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_trained_timesteps = len(scheduler)
t_start = num_trained_timesteps - 2
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(0)
# add noise
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for t in timesteps:
# 1. predict noise residual
residual = model(sample, t)
# 2. predict previous mean of sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 387.9466) < 1e-2, f" expected result sum 387.9466, but get {result_sum}"
assert abs(result_mean.item() - 0.5051) < 1e-3, f" expected result mean 0.5051, but get {result_mean}"
|
diffusers/tests/schedulers/test_scheduler_ddpm.py/0
|
{
"file_path": "diffusers/tests/schedulers/test_scheduler_ddpm.py",
"repo_id": "diffusers",
"token_count": 3860
}
| 163
|
import tempfile
from typing import Dict, List, Tuple
import torch
from diffusers import LCMScheduler
from diffusers.utils.testing_utils import torch_device
from .test_schedulers import SchedulerCommonTest
class LCMSchedulerTest(SchedulerCommonTest):
scheduler_classes = (LCMScheduler,)
forward_default_kwargs = (("num_inference_steps", 10),)
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.00085,
"beta_end": 0.0120,
"beta_schedule": "scaled_linear",
"prediction_type": "epsilon",
}
config.update(**kwargs)
return config
@property
def default_valid_timestep(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
scheduler_config = self.get_scheduler_config()
scheduler = self.scheduler_classes[0](**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
timestep = scheduler.timesteps[-1]
return timestep
def test_timesteps(self):
for timesteps in [100, 500, 1000]:
# 0 is not guaranteed to be in the timestep schedule, but timesteps - 1 is
self.check_over_configs(time_step=timesteps - 1, num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(time_step=self.default_valid_timestep, beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear", "squaredcos_cap_v2"]:
self.check_over_configs(time_step=self.default_valid_timestep, beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(time_step=self.default_valid_timestep, prediction_type=prediction_type)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(time_step=self.default_valid_timestep, clip_sample=clip_sample)
def test_thresholding(self):
self.check_over_configs(time_step=self.default_valid_timestep, thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
time_step=self.default_valid_timestep,
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def test_time_indices(self):
# Get default timestep schedule.
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
scheduler_config = self.get_scheduler_config()
scheduler = self.scheduler_classes[0](**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
timesteps = scheduler.timesteps
for t in timesteps:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
# Hardcoded for now
for t, num_inference_steps in zip([99, 39, 39, 19], [10, 25, 26, 50]):
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
# Override test_add_noise_device because the hardcoded num_inference_steps of 100 doesn't work
# for LCMScheduler under default settings
def test_add_noise_device(self, num_inference_steps=10):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
sample = self.dummy_sample.to(torch_device)
scaled_sample = scheduler.scale_model_input(sample, 0.0)
self.assertEqual(sample.shape, scaled_sample.shape)
noise = torch.randn_like(scaled_sample).to(torch_device)
t = scheduler.timesteps[5][None]
noised = scheduler.add_noise(scaled_sample, noise, t)
self.assertEqual(noised.shape, scaled_sample.shape)
# Override test_from_save_pretrained because it hardcodes a timestep of 1
def test_from_save_pretrained(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
timestep = self.default_valid_timestep
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
scheduler.set_timesteps(num_inference_steps)
new_scheduler.set_timesteps(num_inference_steps)
kwargs["generator"] = torch.manual_seed(0)
output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample
kwargs["generator"] = torch.manual_seed(0)
new_output = new_scheduler.step(residual, timestep, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
# Override test_step_shape because uses 0 and 1 as hardcoded timesteps
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
scheduler.set_timesteps(num_inference_steps)
timestep_0 = scheduler.timesteps[-2]
timestep_1 = scheduler.timesteps[-1]
output_0 = scheduler.step(residual, timestep_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, timestep_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
# Override test_set_scheduler_outputs_equivalence since it uses 0 as a hardcoded timestep
def test_scheduler_outputs_equivalence(self):
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", 50)
timestep = self.default_valid_timestep
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
sample = self.dummy_sample
residual = 0.1 * sample
scheduler.set_timesteps(num_inference_steps)
kwargs["generator"] = torch.manual_seed(0)
outputs_dict = scheduler.step(residual, timestep, sample, **kwargs)
scheduler.set_timesteps(num_inference_steps)
kwargs["generator"] = torch.manual_seed(0)
outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs)
recursive_check(outputs_tuple, outputs_dict)
def full_loop(self, num_inference_steps=10, seed=0, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
model = self.dummy_model()
sample = self.dummy_sample_deter
generator = torch.manual_seed(seed)
scheduler.set_timesteps(num_inference_steps)
for t in scheduler.timesteps:
residual = model(sample, t)
sample = scheduler.step(residual, t, sample, generator).prev_sample
return sample
def test_full_loop_onestep(self):
sample = self.full_loop(num_inference_steps=1)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
# TODO: get expected sum and mean
assert abs(result_sum.item() - 18.7097) < 1e-3
assert abs(result_mean.item() - 0.0244) < 1e-3
def test_full_loop_multistep(self):
sample = self.full_loop(num_inference_steps=10)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
# TODO: get expected sum and mean
assert abs(result_sum.item() - 197.7616) < 1e-3
assert abs(result_mean.item() - 0.2575) < 1e-3
def test_custom_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=timesteps)
scheduler_timesteps = scheduler.timesteps
for i, timestep in enumerate(scheduler_timesteps):
if i == len(timesteps) - 1:
expected_prev_t = -1
else:
expected_prev_t = timesteps[i + 1]
prev_t = scheduler.previous_timestep(timestep)
prev_t = prev_t.item()
self.assertEqual(prev_t, expected_prev_t)
def test_custom_timesteps_increasing_order(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 51, 0]
with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=timesteps)
def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [100, 87, 50, 1, 0]
num_inference_steps = len(timesteps)
with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps)
def test_custom_timesteps_too_large(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [scheduler.config.num_train_timesteps]
with self.assertRaises(
ValueError,
msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}",
):
scheduler.set_timesteps(timesteps=timesteps)
|
diffusers/tests/schedulers/test_scheduler_lcm.py/0
|
{
"file_path": "diffusers/tests/schedulers/test_scheduler_lcm.py",
"repo_id": "diffusers",
"token_count": 5668
}
| 164
|
import gc
import tempfile
import unittest
import torch
from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
numpy_cosine_similarity_distance,
require_torch_gpu,
slow,
torch_device,
)
from .single_file_testing_utils import (
SDSingleFileTesterMixin,
download_diffusers_config,
download_original_config,
download_single_file_checkpoint,
)
enable_full_determinism()
@slow
@require_torch_gpu
class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionControlNetPipeline
ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
original_config = (
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
)
repo_id = "runwayml/stable-diffusion-v1-5"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/sketch-mountains-input.png"
)
control_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
).resize((512, 512))
prompt = "bird"
inputs = {
"prompt": prompt,
"image": init_image,
"control_image": control_image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny")
pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet)
pipe.unet.set_default_attn_processor()
pipe.enable_model_cpu_offload()
pipe_sf = self.pipeline_class.from_single_file(
self.ckpt_path,
controlnet=controlnet,
)
pipe_sf.unet.set_default_attn_processor()
pipe_sf.enable_model_cpu_offload()
inputs = self.get_inputs(torch_device)
output = pipe(**inputs).images[0]
inputs = self.get_inputs(torch_device)
output_sf = pipe_sf(**inputs).images[0]
max_diff = numpy_cosine_similarity_distance(output_sf.flatten(), output.flatten())
assert max_diff < 1e-3
def test_single_file_components(self):
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny")
pipe = self.pipeline_class.from_pretrained(
self.repo_id, variant="fp16", safety_checker=None, controlnet=controlnet
)
pipe_single_file = self.pipeline_class.from_single_file(
self.ckpt_path,
safety_checker=None,
controlnet=controlnet,
)
super()._compare_component_configs(pipe, pipe_single_file)
def test_single_file_components_local_files_only(self):
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny")
pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet)
with tempfile.TemporaryDirectory() as tmpdir:
ckpt_filename = self.ckpt_path.split("/")[-1]
local_ckpt_path = download_single_file_checkpoint(self.repo_id, ckpt_filename, tmpdir)
pipe_single_file = self.pipeline_class.from_single_file(
local_ckpt_path, controlnet=controlnet, safety_checker=None, local_files_only=True
)
super()._compare_component_configs(pipe, pipe_single_file)
def test_single_file_components_with_original_config(self):
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16")
pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet)
pipe_single_file = self.pipeline_class.from_single_file(
self.ckpt_path, controlnet=controlnet, safety_checker=None, original_config=self.original_config
)
super()._compare_component_configs(pipe, pipe_single_file)
def test_single_file_components_with_original_config_local_files_only(self):
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16"
)
pipe = self.pipeline_class.from_pretrained(
self.repo_id,
controlnet=controlnet,
)
with tempfile.TemporaryDirectory() as tmpdir:
ckpt_filename = self.ckpt_path.split("/")[-1]
local_ckpt_path = download_single_file_checkpoint(self.repo_id, ckpt_filename, tmpdir)
local_original_config = download_original_config(self.original_config, tmpdir)
pipe_single_file = self.pipeline_class.from_single_file(
local_ckpt_path,
original_config=local_original_config,
controlnet=controlnet,
safety_checker=None,
local_files_only=True,
)
super()._compare_component_configs(pipe, pipe_single_file)
def test_single_file_components_with_diffusers_config(self):
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16")
pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet)
pipe_single_file = self.pipeline_class.from_single_file(
self.ckpt_path, controlnet=controlnet, safety_checker=None, original_config=self.original_config
)
super()._compare_component_configs(pipe, pipe_single_file)
def test_single_file_components_with_diffusers_config_local_files_only(self):
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16"
)
pipe = self.pipeline_class.from_pretrained(
self.repo_id,
controlnet=controlnet,
)
with tempfile.TemporaryDirectory() as tmpdir:
ckpt_filename = self.ckpt_path.split("/")[-1]
local_ckpt_path = download_single_file_checkpoint(self.repo_id, ckpt_filename, tmpdir)
local_diffusers_config = download_diffusers_config(self.repo_id, tmpdir)
pipe_single_file = self.pipeline_class.from_single_file(
local_ckpt_path,
config=local_diffusers_config,
safety_checker=None,
controlnet=controlnet,
local_files_only=True,
)
super()._compare_component_configs(pipe, pipe_single_file)
def test_single_file_setting_pipeline_dtype_to_fp16(self):
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16"
)
single_file_pipe = self.pipeline_class.from_single_file(
self.ckpt_path, controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
)
super().test_single_file_setting_pipeline_dtype_to_fp16(single_file_pipe)
|
diffusers/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py/0
|
{
"file_path": "diffusers/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py",
"repo_id": "diffusers",
"token_count": 3508
}
| 165
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import importlib.util
import os
import re
from pathlib import Path
PATH_TO_TRANSFORMERS = "src/transformers"
# Matches is_xxx_available()
_re_backend = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_re_one_line_import_struct = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_re_import_struct_key_value = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_re_test_backend = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_re_import_struct_add_one = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_re_import_struct_add_many = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_re_quote_object = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_re_between_brackets = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_re_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_re_try = re.compile(r"^\s*try:")
# Catches a line with else:
_re_else = re.compile(r"^\s*else:")
def find_backend(line):
"""Find one (or multiple) backend in a code line of the init."""
if _re_test_backend.search(line) is None:
return None
backends = [b[0] for b in _re_backend.findall(line)]
backends.sort()
return "_and_".join(backends)
def parse_init(init_file):
"""
Read an init_file and parse (per backend) the _import_structure objects defined and the TYPE_CHECKING objects
defined
"""
with open(init_file, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
line_index = 0
while line_index < len(lines) and not lines[line_index].startswith("_import_structure = {"):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lines):
return None
# First grab the objects without a specific backend in _import_structure
objects = []
while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None:
line = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(line):
content = _re_one_line_import_struct.search(line).groups()[0]
imports = re.findall(r"\[([^\]]+)\]", content)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", ")])
line_index += 1
continue
single_line_import_search = _re_import_struct_key_value.search(line)
if single_line_import_search is not None:
imports = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(obj) > 0]
objects.extend(imports)
elif line.startswith(" " * 8 + '"'):
objects.append(line[9:-3])
line_index += 1
import_dict_objects = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING"):
# If the line is an if not is_backend_available, we grab all objects associated.
backend = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
backend = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4):
line = lines[line_index]
if _re_import_struct_add_one.search(line) is not None:
objects.append(_re_import_struct_add_one.search(line).groups()[0])
elif _re_import_struct_add_many.search(line) is not None:
imports = _re_import_struct_add_many.search(line).groups()[0].split(", ")
imports = [obj[1:-1] for obj in imports if len(obj) > 0]
objects.extend(imports)
elif _re_between_brackets.search(line) is not None:
imports = _re_between_brackets.search(line).groups()[0].split(", ")
imports = [obj[1:-1] for obj in imports if len(obj) > 0]
objects.extend(imports)
elif _re_quote_object.search(line) is not None:
objects.append(_re_quote_object.search(line).groups()[0])
elif line.startswith(" " * 8 + '"'):
objects.append(line[9:-3])
elif line.startswith(" " * 12 + '"'):
objects.append(line[13:-3])
line_index += 1
import_dict_objects[backend] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
objects = []
while (
line_index < len(lines)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("else")
):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 8):
objects.append(line[8:-2])
line_index += 1
type_hint_objects = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lines):
# If the line is an if is_backend_available, we grab all objects associated.
backend = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
backend = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
objects.append(line[12:-2])
line_index += 1
type_hint_objects[backend] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def analyze_results(import_dict_objects, type_hint_objects):
"""
Analyze the differences between _import_structure objects and TYPE_CHECKING objects found in an init.
"""
def find_duplicates(seq):
return [k for k, v in collections.Counter(seq).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
errors = []
for key in import_dict_objects.keys():
duplicate_imports = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}")
duplicate_type_hints = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}")
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
name = "base imports" if key == "none" else f"{key} backend"
errors.append(f"Differences for {name}:")
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure.")
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT.")
return errors
def check_all_inits():
"""
Check all inits in the transformers repo and raise an error if at least one does not define the same objects in
both halves.
"""
failures = []
for root, _, files in os.walk(PATH_TO_TRANSFORMERS):
if "__init__.py" in files:
fname = os.path.join(root, "__init__.py")
objects = parse_init(fname)
if objects is not None:
errors = analyze_results(*objects)
if len(errors) > 0:
errors[0] = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(errors))
if len(failures) > 0:
raise ValueError("\n\n".join(failures))
def get_transformers_submodules():
"""
Returns the list of Transformers submodules.
"""
submodules = []
for path, directories, files in os.walk(PATH_TO_TRANSFORMERS):
for folder in directories:
# Ignore private modules
if folder.startswith("_"):
directories.remove(folder)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(path) / folder).glob("*.py"))) == 0:
continue
short_path = str((Path(path) / folder).relative_to(PATH_TO_TRANSFORMERS))
submodule = short_path.replace(os.path.sep, ".")
submodules.append(submodule)
for fname in files:
if fname == "__init__.py":
continue
short_path = str((Path(path) / fname).relative_to(PATH_TO_TRANSFORMERS))
submodule = short_path.replace(".py", "").replace(os.path.sep, ".")
if len(submodule.split(".")) == 1:
submodules.append(submodule)
return submodules
IGNORE_SUBMODULES = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def check_submodules():
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
transformers = spec.loader.load_module()
module_not_registered = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(module_not_registered) > 0:
list_of_modules = "\n".join(f"- {module}" for module in module_not_registered)
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value."
)
if __name__ == "__main__":
check_all_inits()
check_submodules()
|
diffusers/utils/check_inits.py/0
|
{
"file_path": "diffusers/utils/check_inits.py",
"repo_id": "diffusers",
"token_count": 5410
}
| 166
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility that updates the metadata of the Diffusers library in the repository `huggingface/diffusers-metadata`.
Usage for an update (as used by the GitHub action `update_metadata`):
```bash
python utils/update_metadata.py
```
Script modified from:
https://github.com/huggingface/transformers/blob/main/utils/update_metadata.py
"""
import argparse
import os
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from diffusers.pipelines.auto_pipeline import (
AUTO_IMAGE2IMAGE_PIPELINES_MAPPING,
AUTO_INPAINT_PIPELINES_MAPPING,
AUTO_TEXT2IMAGE_PIPELINES_MAPPING,
)
PIPELINE_TAG_JSON = "pipeline_tags.json"
def get_supported_pipeline_table() -> dict:
"""
Generates a dictionary containing the supported auto classes for each pipeline type,
using the content of the auto modules.
"""
# All supported pipelines for automatic mapping.
all_supported_pipeline_classes = [
(class_name.__name__, "text-to-image", "AutoPipelineForText2Image")
for _, class_name in AUTO_TEXT2IMAGE_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes += [
(class_name.__name__, "image-to-image", "AutoPipelineForImage2Image")
for _, class_name in AUTO_IMAGE2IMAGE_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes += [
(class_name.__name__, "image-to-image", "AutoPipelineForInpainting")
for _, class_name in AUTO_INPAINT_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes = list(set(all_supported_pipeline_classes))
all_supported_pipeline_classes.sort(key=lambda x: x[0])
data = {}
data["pipeline_class"] = [sample[0] for sample in all_supported_pipeline_classes]
data["pipeline_tag"] = [sample[1] for sample in all_supported_pipeline_classes]
data["auto_class"] = [sample[2] for sample in all_supported_pipeline_classes]
return data
def update_metadata(commit_sha: str):
"""
Update the metadata for the Diffusers repo in `huggingface/diffusers-metadata`.
Args:
commit_sha (`str`): The commit SHA on Diffusers corresponding to this update.
"""
pipelines_table = get_supported_pipeline_table()
pipelines_table = pd.DataFrame(pipelines_table)
pipelines_dataset = Dataset.from_pandas(pipelines_table)
hub_pipeline_tags_json = hf_hub_download(
repo_id="huggingface/diffusers-metadata",
filename=PIPELINE_TAG_JSON,
repo_type="dataset",
)
with open(hub_pipeline_tags_json) as f:
hub_pipeline_tags_json = f.read()
with tempfile.TemporaryDirectory() as tmp_dir:
pipelines_dataset.to_json(os.path.join(tmp_dir, PIPELINE_TAG_JSON))
with open(os.path.join(tmp_dir, PIPELINE_TAG_JSON)) as f:
pipeline_tags_json = f.read()
hub_pipeline_tags_equal = hub_pipeline_tags_json == pipeline_tags_json
if hub_pipeline_tags_equal:
print("No updates, not pushing the metadata files.")
return
if commit_sha is not None:
commit_message = (
f"Update with commit {commit_sha}\n\nSee: "
f"https://github.com/huggingface/diffusers/commit/{commit_sha}"
)
else:
commit_message = "Update"
upload_folder(
repo_id="huggingface/diffusers-metadata",
folder_path=tmp_dir,
repo_type="dataset",
commit_message=commit_message,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--commit_sha", default=None, type=str, help="The sha of the commit going with this update.")
args = parser.parse_args()
update_metadata(args.commit_sha)
|
diffusers/utils/update_metadata.py/0
|
{
"file_path": "diffusers/utils/update_metadata.py",
"repo_id": "diffusers",
"token_count": 1709
}
| 167
|
FROM nvidia/cuda:12.2.2-devel-ubuntu22.04
# Configure image
ARG PYTHON_VERSION=3.10
ARG DEBIAN_FRONTEND=noninteractive
# Install apt dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential cmake \
git git-lfs openssh-client \
nano vim less util-linux tree \
htop atop nvtop \
sed gawk grep curl wget zip unzip \
tcpdump sysstat screen tmux \
libglib2.0-0 libgl1-mesa-glx libegl1-mesa \
speech-dispatcher \
python${PYTHON_VERSION} python${PYTHON_VERSION}-venv \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
# Install ffmpeg build dependencies. See:
# https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu
# TODO(aliberts): create image to build dependencies from source instead
RUN apt-get update && apt-get install -y --no-install-recommends \
autoconf automake yasm \
libass-dev \
libfreetype6-dev \
libgnutls28-dev \
libunistring-dev \
libmp3lame-dev \
libtool \
libvorbis-dev \
meson \
ninja-build \
pkg-config \
texinfo \
yasm \
zlib1g-dev \
nasm \
libx264-dev \
libx265-dev libnuma-dev \
libvpx-dev \
libfdk-aac-dev \
libopus-dev \
libsvtav1-dev libsvtav1enc-dev libsvtav1dec-dev \
libdav1d-dev
# Install gh cli tool
RUN (type -p wget >/dev/null || (apt update && apt-get install wget -y)) \
&& mkdir -p -m 755 /etc/apt/keyrings \
&& wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \
&& chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
&& apt update \
&& apt install gh -y \
&& apt clean && rm -rf /var/lib/apt/lists/*
# Setup `python`
RUN ln -s /usr/bin/python3 /usr/bin/python
# Install poetry
RUN curl -sSL https://install.python-poetry.org | python -
ENV PATH="/root/.local/bin:$PATH"
RUN echo 'if [ "$HOME" != "/root" ]; then ln -sf /root/.local/bin/poetry $HOME/.local/bin/poetry; fi' >> /root/.bashrc
RUN poetry config virtualenvs.create false
RUN poetry config virtualenvs.in-project true
# Set EGL as the rendering backend for MuJoCo
ENV MUJOCO_GL="egl"
|
lerobot/docker/lerobot-gpu-dev/Dockerfile/0
|
{
"file_path": "lerobot/docker/lerobot-gpu-dev/Dockerfile",
"repo_id": "lerobot",
"token_count": 983
}
| 168
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from pathlib import Path
from typing import Callable
import datasets
import torch
import torch.utils
from lerobot.common.datasets.compute_stats import aggregate_stats
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
load_episode_data_index,
load_hf_dataset,
load_info,
load_previous_and_future_frames,
load_stats,
load_videos,
reset_episode_index,
)
from lerobot.common.datasets.video_utils import VideoFrame, load_from_videos
# For maintainers, see lerobot/common/datasets/push_dataset_to_hub/CODEBASE_VERSION.md
CODEBASE_VERSION = "v1.6"
DATA_DIR = Path(os.environ["DATA_DIR"]) if "DATA_DIR" in os.environ else None
class LeRobotDataset(torch.utils.data.Dataset):
def __init__(
self,
repo_id: str,
root: Path | None = DATA_DIR,
split: str = "train",
image_transforms: Callable | None = None,
delta_timestamps: dict[list[float]] | None = None,
video_backend: str | None = None,
):
super().__init__()
self.repo_id = repo_id
self.root = root
self.split = split
self.image_transforms = image_transforms
self.delta_timestamps = delta_timestamps
# load data from hub or locally when root is provided
# TODO(rcadene, aliberts): implement faster transfer
# https://huggingface.co/docs/huggingface_hub/en/guides/download#faster-downloads
self.hf_dataset = load_hf_dataset(repo_id, CODEBASE_VERSION, root, split)
if split == "train":
self.episode_data_index = load_episode_data_index(repo_id, CODEBASE_VERSION, root)
else:
self.episode_data_index = calculate_episode_data_index(self.hf_dataset)
self.hf_dataset = reset_episode_index(self.hf_dataset)
self.stats = load_stats(repo_id, CODEBASE_VERSION, root)
self.info = load_info(repo_id, CODEBASE_VERSION, root)
if self.video:
self.videos_dir = load_videos(repo_id, CODEBASE_VERSION, root)
self.video_backend = video_backend if video_backend is not None else "pyav"
@property
def fps(self) -> int:
"""Frames per second used during data collection."""
return self.info["fps"]
@property
def video(self) -> bool:
"""Returns True if this dataset loads video frames from mp4 files.
Returns False if it only loads images from png files.
"""
return self.info.get("video", False)
@property
def features(self) -> datasets.Features:
return self.hf_dataset.features
@property
def camera_keys(self) -> list[str]:
"""Keys to access image and video stream from cameras."""
keys = []
for key, feats in self.hf_dataset.features.items():
if isinstance(feats, (datasets.Image, VideoFrame)):
keys.append(key)
return keys
@property
def video_frame_keys(self) -> list[str]:
"""Keys to access video frames that requires to be decoded into images.
Note: It is empty if the dataset contains images only,
or equal to `self.cameras` if the dataset contains videos only,
or can even be a subset of `self.cameras` in a case of a mixed image/video dataset.
"""
video_frame_keys = []
for key, feats in self.hf_dataset.features.items():
if isinstance(feats, VideoFrame):
video_frame_keys.append(key)
return video_frame_keys
@property
def num_samples(self) -> int:
"""Number of samples/frames."""
return len(self.hf_dataset)
@property
def num_episodes(self) -> int:
"""Number of episodes."""
return len(self.hf_dataset.unique("episode_index"))
@property
def tolerance_s(self) -> float:
"""Tolerance in seconds used to discard loaded frames when their timestamps
are not close enough from the requested frames. It is only used when `delta_timestamps`
is provided or when loading video frames from mp4 files.
"""
# 1e-4 to account for possible numerical error
return 1 / self.fps - 1e-4
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
item = self.hf_dataset[idx]
if self.delta_timestamps is not None:
item = load_previous_and_future_frames(
item,
self.hf_dataset,
self.episode_data_index,
self.delta_timestamps,
self.tolerance_s,
)
if self.video:
item = load_from_videos(
item,
self.video_frame_keys,
self.videos_dir,
self.tolerance_s,
self.video_backend,
)
if self.image_transforms is not None:
for cam in self.camera_keys:
item[cam] = self.image_transforms(item[cam])
return item
def __repr__(self):
return (
f"{self.__class__.__name__}(\n"
f" Repository ID: '{self.repo_id}',\n"
f" Split: '{self.split}',\n"
f" Number of Samples: {self.num_samples},\n"
f" Number of Episodes: {self.num_episodes},\n"
f" Type: {'video (.mp4)' if self.video else 'image (.png)'},\n"
f" Recorded Frames per Second: {self.fps},\n"
f" Camera Keys: {self.camera_keys},\n"
f" Video Frame Keys: {self.video_frame_keys if self.video else 'N/A'},\n"
f" Transformations: {self.image_transforms},\n"
f" Codebase Version: {self.info.get('codebase_version', '< v1.6')},\n"
f")"
)
@classmethod
def from_preloaded(
cls,
repo_id: str = "from_preloaded",
root: Path | None = None,
split: str = "train",
transform: callable = None,
delta_timestamps: dict[list[float]] | None = None,
# additional preloaded attributes
hf_dataset=None,
episode_data_index=None,
stats=None,
info=None,
videos_dir=None,
video_backend=None,
) -> "LeRobotDataset":
"""Create a LeRobot Dataset from existing data and attributes instead of loading from the filesystem.
It is especially useful when converting raw data into LeRobotDataset before saving the dataset
on the filesystem or uploading to the hub.
Note: Meta-data attributes like `repo_id`, `version`, `root`, etc are optional and potentially
meaningless depending on the downstream usage of the return dataset.
"""
# create an empty object of type LeRobotDataset
obj = cls.__new__(cls)
obj.repo_id = repo_id
obj.root = root
obj.split = split
obj.image_transforms = transform
obj.delta_timestamps = delta_timestamps
obj.hf_dataset = hf_dataset
obj.episode_data_index = episode_data_index
obj.stats = stats
obj.info = info if info is not None else {}
obj.videos_dir = videos_dir
obj.video_backend = video_backend if video_backend is not None else "pyav"
return obj
class MultiLeRobotDataset(torch.utils.data.Dataset):
"""A dataset consisting of multiple underlying `LeRobotDataset`s.
The underlying `LeRobotDataset`s are effectively concatenated, and this class adopts much of the API
structure of `LeRobotDataset`.
"""
def __init__(
self,
repo_ids: list[str],
root: Path | None = DATA_DIR,
split: str = "train",
image_transforms: Callable | None = None,
delta_timestamps: dict[list[float]] | None = None,
video_backend: str | None = None,
):
super().__init__()
self.repo_ids = repo_ids
# Construct the underlying datasets passing everything but `transform` and `delta_timestamps` which
# are handled by this class.
self._datasets = [
LeRobotDataset(
repo_id,
root=root,
split=split,
delta_timestamps=delta_timestamps,
image_transforms=image_transforms,
video_backend=video_backend,
)
for repo_id in repo_ids
]
# Check that some properties are consistent across datasets. Note: We may relax some of these
# consistency requirements in future iterations of this class.
for repo_id, dataset in zip(self.repo_ids, self._datasets, strict=True):
if dataset.info != self._datasets[0].info:
raise ValueError(
f"Detected a mismatch in dataset info between {self.repo_ids[0]} and {repo_id}. This is "
"not yet supported."
)
# Disable any data keys that are not common across all of the datasets. Note: we may relax this
# restriction in future iterations of this class. For now, this is necessary at least for being able
# to use PyTorch's default DataLoader collate function.
self.disabled_data_keys = set()
intersection_data_keys = set(self._datasets[0].hf_dataset.features)
for dataset in self._datasets:
intersection_data_keys.intersection_update(dataset.hf_dataset.features)
if len(intersection_data_keys) == 0:
raise RuntimeError(
"Multiple datasets were provided but they had no keys common to all of them. The "
"multi-dataset functionality currently only keeps common keys."
)
for repo_id, dataset in zip(self.repo_ids, self._datasets, strict=True):
extra_keys = set(dataset.hf_dataset.features).difference(intersection_data_keys)
logging.warning(
f"keys {extra_keys} of {repo_id} were disabled as they are not contained in all the "
"other datasets."
)
self.disabled_data_keys.update(extra_keys)
self.root = root
self.split = split
self.image_transforms = image_transforms
self.delta_timestamps = delta_timestamps
self.stats = aggregate_stats(self._datasets)
@property
def repo_id_to_index(self):
"""Return a mapping from dataset repo_id to a dataset index automatically created by this class.
This index is incorporated as a data key in the dictionary returned by `__getitem__`.
"""
return {repo_id: i for i, repo_id in enumerate(self.repo_ids)}
@property
def repo_index_to_id(self):
"""Return the inverse mapping if repo_id_to_index."""
return {v: k for k, v in self.repo_id_to_index}
@property
def fps(self) -> int:
"""Frames per second used during data collection.
NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info.
"""
return self._datasets[0].info["fps"]
@property
def video(self) -> bool:
"""Returns True if this dataset loads video frames from mp4 files.
Returns False if it only loads images from png files.
NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info.
"""
return self._datasets[0].info.get("video", False)
@property
def features(self) -> datasets.Features:
features = {}
for dataset in self._datasets:
features.update({k: v for k, v in dataset.features.items() if k not in self.disabled_data_keys})
return features
@property
def camera_keys(self) -> list[str]:
"""Keys to access image and video stream from cameras."""
keys = []
for key, feats in self.features.items():
if isinstance(feats, (datasets.Image, VideoFrame)):
keys.append(key)
return keys
@property
def video_frame_keys(self) -> list[str]:
"""Keys to access video frames that requires to be decoded into images.
Note: It is empty if the dataset contains images only,
or equal to `self.cameras` if the dataset contains videos only,
or can even be a subset of `self.cameras` in a case of a mixed image/video dataset.
"""
video_frame_keys = []
for key, feats in self.features.items():
if isinstance(feats, VideoFrame):
video_frame_keys.append(key)
return video_frame_keys
@property
def num_samples(self) -> int:
"""Number of samples/frames."""
return sum(d.num_samples for d in self._datasets)
@property
def num_episodes(self) -> int:
"""Number of episodes."""
return sum(d.num_episodes for d in self._datasets)
@property
def tolerance_s(self) -> float:
"""Tolerance in seconds used to discard loaded frames when their timestamps
are not close enough from the requested frames. It is only used when `delta_timestamps`
is provided or when loading video frames from mp4 files.
"""
# 1e-4 to account for possible numerical error
return 1 / self.fps - 1e-4
def __len__(self):
return self.num_samples
def __getitem__(self, idx: int) -> dict[str, torch.Tensor]:
if idx >= len(self):
raise IndexError(f"Index {idx} out of bounds.")
# Determine which dataset to get an item from based on the index.
start_idx = 0
dataset_idx = 0
for dataset in self._datasets:
if idx >= start_idx + dataset.num_samples:
start_idx += dataset.num_samples
dataset_idx += 1
continue
break
else:
raise AssertionError("We expect the loop to break out as long as the index is within bounds.")
item = self._datasets[dataset_idx][idx - start_idx]
item["dataset_index"] = torch.tensor(dataset_idx)
for data_key in self.disabled_data_keys:
if data_key in item:
del item[data_key]
return item
def __repr__(self):
return (
f"{self.__class__.__name__}(\n"
f" Repository IDs: '{self.repo_ids}',\n"
f" Split: '{self.split}',\n"
f" Number of Samples: {self.num_samples},\n"
f" Number of Episodes: {self.num_episodes},\n"
f" Type: {'video (.mp4)' if self.video else 'image (.png)'},\n"
f" Recorded Frames per Second: {self.fps},\n"
f" Camera Keys: {self.camera_keys},\n"
f" Video Frame Keys: {self.video_frame_keys if self.video else 'N/A'},\n"
f" Transformations: {self.image_transforms},\n"
f")"
)
|
lerobot/lerobot/common/datasets/lerobot_dataset.py/0
|
{
"file_path": "lerobot/lerobot/common/datasets/lerobot_dataset.py",
"repo_id": "lerobot",
"token_count": 6675
}
| 169
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains utilities to process raw data format of HDF5 files like in: https://github.com/tonyzhaozh/act
"""
import gc
import shutil
from pathlib import Path
import h5py
import numpy as np
import torch
import tqdm
from datasets import Dataset, Features, Image, Sequence, Value
from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.utils import (
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
)
from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
def get_cameras(hdf5_data):
# ignore depth channel, not currently handled
# TODO(rcadene): add depth
rgb_cameras = [key for key in hdf5_data["/observations/images"].keys() if "depth" not in key] # noqa: SIM118
return rgb_cameras
def check_format(raw_dir) -> bool:
# only frames from simulation are uncompressed
compressed_images = "sim" not in raw_dir.name
hdf5_paths = list(raw_dir.glob("episode_*.hdf5"))
assert len(hdf5_paths) != 0
for hdf5_path in hdf5_paths:
with h5py.File(hdf5_path, "r") as data:
assert "/action" in data
assert "/observations/qpos" in data
assert data["/action"].ndim == 2
assert data["/observations/qpos"].ndim == 2
num_frames = data["/action"].shape[0]
assert num_frames == data["/observations/qpos"].shape[0]
for camera in get_cameras(data):
assert num_frames == data[f"/observations/images/{camera}"].shape[0]
if compressed_images:
assert data[f"/observations/images/{camera}"].ndim == 2
else:
assert data[f"/observations/images/{camera}"].ndim == 4
b, h, w, c = data[f"/observations/images/{camera}"].shape
assert c < h and c < w, f"Expect (h,w,c) image format but ({h=},{w=},{c=}) provided."
def load_from_raw(
raw_dir: Path,
videos_dir: Path,
fps: int,
video: bool,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
# only frames from simulation are uncompressed
compressed_images = "sim" not in raw_dir.name
hdf5_files = sorted(raw_dir.glob("episode_*.hdf5"))
num_episodes = len(hdf5_files)
ep_dicts = []
ep_ids = episodes if episodes else range(num_episodes)
for ep_idx in tqdm.tqdm(ep_ids):
ep_path = hdf5_files[ep_idx]
with h5py.File(ep_path, "r") as ep:
num_frames = ep["/action"].shape[0]
# last step of demonstration is considered done
done = torch.zeros(num_frames, dtype=torch.bool)
done[-1] = True
state = torch.from_numpy(ep["/observations/qpos"][:])
action = torch.from_numpy(ep["/action"][:])
if "/observations/qvel" in ep:
velocity = torch.from_numpy(ep["/observations/qvel"][:])
if "/observations/effort" in ep:
effort = torch.from_numpy(ep["/observations/effort"][:])
ep_dict = {}
for camera in get_cameras(ep):
img_key = f"observation.images.{camera}"
if compressed_images:
import cv2
# load one compressed image after the other in RAM and uncompress
imgs_array = []
for data in ep[f"/observations/images/{camera}"]:
imgs_array.append(cv2.imdecode(data, 1))
imgs_array = np.array(imgs_array)
else:
# load all images in RAM
imgs_array = ep[f"/observations/images/{camera}"][:]
if video:
# save png images in temporary directory
tmp_imgs_dir = videos_dir / "tmp_images"
save_images_concurrently(imgs_array, tmp_imgs_dir)
# encode images to a mp4 video
fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
video_path = videos_dir / fname
encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
# clean temporary images directory
shutil.rmtree(tmp_imgs_dir)
# store the reference to the video frame
ep_dict[img_key] = [
{"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)
]
else:
ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
ep_dict["observation.state"] = state
if "/observations/velocity" in ep:
ep_dict["observation.velocity"] = velocity
if "/observations/effort" in ep:
ep_dict["observation.effort"] = effort
ep_dict["action"] = action
ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames)
ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
ep_dict["next.done"] = done
# TODO(rcadene): add reward and success by computing them in sim
assert isinstance(ep_idx, int)
ep_dicts.append(ep_dict)
gc.collect()
data_dict = concatenate_episodes(ep_dicts)
total_frames = data_dict["frame_index"].shape[0]
data_dict["index"] = torch.arange(0, total_frames, 1)
return data_dict
def to_hf_dataset(data_dict, video) -> Dataset:
features = {}
keys = [key for key in data_dict if "observation.images." in key]
for key in keys:
if video:
features[key] = VideoFrame()
else:
features[key] = Image()
features["observation.state"] = Sequence(
length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None)
)
if "observation.velocity" in data_dict:
features["observation.velocity"] = Sequence(
length=data_dict["observation.velocity"].shape[1], feature=Value(dtype="float32", id=None)
)
if "observation.effort" in data_dict:
features["observation.effort"] = Sequence(
length=data_dict["observation.effort"].shape[1], feature=Value(dtype="float32", id=None)
)
features["action"] = Sequence(
length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)
)
features["episode_index"] = Value(dtype="int64", id=None)
features["frame_index"] = Value(dtype="int64", id=None)
features["timestamp"] = Value(dtype="float32", id=None)
features["next.done"] = Value(dtype="bool", id=None)
features["index"] = Value(dtype="int64", id=None)
hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
hf_dataset.set_transform(hf_transform_to_torch)
return hf_dataset
def from_raw_to_lerobot_format(
raw_dir: Path,
videos_dir: Path,
fps: int | None = None,
video: bool = True,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
# sanity check
check_format(raw_dir)
if fps is None:
fps = 50
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding)
hf_dataset = to_hf_dataset(data_dict, video)
episode_data_index = calculate_episode_data_index(hf_dataset)
info = {
"codebase_version": CODEBASE_VERSION,
"fps": fps,
"video": video,
}
if video:
info["encoding"] = get_default_encoding()
return hf_dataset, episode_data_index, info
|
lerobot/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py/0
|
{
"file_path": "lerobot/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py",
"repo_id": "lerobot",
"token_count": 3851
}
| 170
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import gymnasium as gym
from omegaconf import DictConfig
def make_env(cfg: DictConfig, n_envs: int | None = None) -> gym.vector.VectorEnv | None:
"""Makes a gym vector environment according to the evaluation config.
n_envs can be used to override eval.batch_size in the configuration. Must be at least 1.
"""
if n_envs is not None and n_envs < 1:
raise ValueError("`n_envs must be at least 1")
if cfg.env.name == "real_world":
return
package_name = f"gym_{cfg.env.name}"
try:
importlib.import_module(package_name)
except ModuleNotFoundError as e:
print(
f"{package_name} is not installed. Please install it with `pip install 'lerobot[{cfg.env.name}]'`"
)
raise e
gym_handle = f"{package_name}/{cfg.env.task}"
gym_kwgs = dict(cfg.env.get("gym", {}))
if cfg.env.get("episode_length"):
gym_kwgs["max_episode_steps"] = cfg.env.episode_length
# batched version of the env that returns an observation of shape (b, c)
env_cls = gym.vector.AsyncVectorEnv if cfg.eval.use_async_envs else gym.vector.SyncVectorEnv
env = env_cls(
[
lambda: gym.make(gym_handle, disable_env_checker=True, **gym_kwgs)
for _ in range(n_envs if n_envs is not None else cfg.eval.batch_size)
]
)
return env
|
lerobot/lerobot/common/envs/factory.py/0
|
{
"file_path": "lerobot/lerobot/common/envs/factory.py",
"repo_id": "lerobot",
"token_count": 744
}
| 171
|
"""
This file contains utilities for recording frames from cameras. For more info look at `OpenCVCamera` docstring.
"""
import argparse
import concurrent.futures
import math
import platform
import shutil
import threading
import time
from dataclasses import dataclass, replace
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
from PIL import Image
from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError
from lerobot.common.utils.utils import capture_timestamp_utc
from lerobot.scripts.control_robot import busy_wait
# Use 1 thread to avoid blocking the main thread. Especially useful during data collection
# when other threads are used to save the images.
cv2.setNumThreads(1)
# The maximum opencv device index depends on your operating system. For instance,
# if you have 3 cameras, they should be associated to index 0, 1, and 2. This is the case
# on MacOS. However, on Ubuntu, the indices are different like 6, 16, 23.
# When you change the USB port or reboot the computer, the operating system might
# treat the same cameras as new devices. Thus we select a higher bound to search indices.
MAX_OPENCV_INDEX = 60
def find_camera_indices(raise_when_empty=False, max_index_search_range=MAX_OPENCV_INDEX):
if platform.system() == "Linux":
# Linux uses camera ports
print("Linux detected. Finding available camera indices through scanning '/dev/video*' ports")
possible_camera_ids = []
for port in Path("/dev").glob("video*"):
camera_idx = int(str(port).replace("/dev/video", ""))
possible_camera_ids.append(camera_idx)
else:
print(
"Mac or Windows detected. Finding available camera indices through "
f"scanning all indices from 0 to {MAX_OPENCV_INDEX}"
)
possible_camera_ids = range(max_index_search_range)
camera_ids = []
for camera_idx in possible_camera_ids:
camera = cv2.VideoCapture(camera_idx)
is_open = camera.isOpened()
camera.release()
if is_open:
print(f"Camera found at index {camera_idx}")
camera_ids.append(camera_idx)
if raise_when_empty and len(camera_ids) == 0:
raise OSError(
"Not a single camera was detected. Try re-plugging, or re-installing `opencv2`, "
"or your camera driver, or make sure your camera is compatible with opencv2."
)
return camera_ids
def save_image(img_array, camera_index, frame_index, images_dir):
img = Image.fromarray(img_array)
path = images_dir / f"camera_{camera_index:02d}_frame_{frame_index:06d}.png"
path.parent.mkdir(parents=True, exist_ok=True)
img.save(str(path), quality=100)
def save_images_from_cameras(
images_dir: Path, camera_ids: list[int] | None = None, fps=None, width=None, height=None, record_time_s=2
):
if camera_ids is None:
camera_ids = find_camera_indices()
print("Connecting cameras")
cameras = []
for cam_idx in camera_ids:
camera = OpenCVCamera(cam_idx, fps=fps, width=width, height=height)
camera.connect()
print(
f"OpenCVCamera({camera.camera_index}, fps={camera.fps}, width={camera.width}, "
f"height={camera.height}, color_mode={camera.color_mode})"
)
cameras.append(camera)
images_dir = Path(images_dir)
if images_dir.exists():
shutil.rmtree(
images_dir,
)
images_dir.mkdir(parents=True, exist_ok=True)
print(f"Saving images to {images_dir}")
frame_index = 0
start_time = time.perf_counter()
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
while True:
now = time.perf_counter()
for camera in cameras:
# If we use async_read when fps is None, the loop will go full speed, and we will endup
# saving the same images from the cameras multiple times until the RAM/disk is full.
image = camera.read() if fps is None else camera.async_read()
executor.submit(
save_image,
image,
camera.camera_index,
frame_index,
images_dir,
)
if fps is not None:
dt_s = time.perf_counter() - now
busy_wait(1 / fps - dt_s)
if time.perf_counter() - start_time > record_time_s:
break
print(f"Frame: {frame_index:04d}\tLatency (ms): {(time.perf_counter() - now) * 1000:.2f}")
frame_index += 1
print(f"Images have been saved to {images_dir}")
@dataclass
class OpenCVCameraConfig:
"""
Example of tested options for Intel Real Sense D405:
```python
OpenCVCameraConfig(30, 640, 480)
OpenCVCameraConfig(60, 640, 480)
OpenCVCameraConfig(90, 640, 480)
OpenCVCameraConfig(30, 1280, 720)
```
"""
fps: int | None = None
width: int | None = None
height: int | None = None
color_mode: str = "rgb"
def __post_init__(self):
if self.color_mode not in ["rgb", "bgr"]:
raise ValueError(
f"Expected color_mode values are 'rgb' or 'bgr', but {self.color_mode} is provided."
)
class OpenCVCamera:
"""
The OpenCVCamera class allows to efficiently record images from cameras. It relies on opencv2 to communicate
with the cameras. Most cameras are compatible. For more info, see the [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html).
An OpenCVCamera instance requires a camera index (e.g. `OpenCVCamera(camera_index=0)`). When you only have one camera
like a webcam of a laptop, the camera index is expected to be 0, but it might also be very different, and the camera index
might change if you reboot your computer or re-plug your camera. This behavior depends on your operation system.
To find the camera indices of your cameras, you can run our utility script that will be save a few frames for each camera:
```bash
python lerobot/common/robot_devices/cameras/opencv.py --images-dir outputs/images_from_opencv_cameras
```
When an OpenCVCamera is instantiated, if no specific config is provided, the default fps, width, height and color_mode
of the given camera will be used.
Example of usage:
```python
camera = OpenCVCamera(camera_index=0)
camera.connect()
color_image = camera.read()
# when done using the camera, consider disconnecting
camera.disconnect()
```
Example of changing default fps, width, height and color_mode:
```python
camera = OpenCVCamera(0, fps=30, width=1280, height=720)
camera = connect() # applies the settings, might error out if these settings are not compatible with the camera
camera = OpenCVCamera(0, fps=90, width=640, height=480)
camera = connect()
camera = OpenCVCamera(0, fps=90, width=640, height=480, color_mode="bgr")
camera = connect()
```
"""
def __init__(self, camera_index: int, config: OpenCVCameraConfig | None = None, **kwargs):
if config is None:
config = OpenCVCameraConfig()
# Overwrite config arguments using kwargs
config = replace(config, **kwargs)
self.camera_index = camera_index
self.fps = config.fps
self.width = config.width
self.height = config.height
self.color_mode = config.color_mode
self.camera = None
self.is_connected = False
self.thread = None
self.stop_event = None
self.color_image = None
self.logs = {}
def connect(self):
if self.is_connected:
raise RobotDeviceAlreadyConnectedError(f"Camera {self.camera_index} is already connected.")
# First create a temporary camera trying to access `camera_index`,
# and verify it is a valid camera by calling `isOpened`.
if platform.system() == "Linux":
# Linux uses ports for connecting to cameras
tmp_camera = cv2.VideoCapture(f"/dev/video{self.camera_index}")
else:
tmp_camera = cv2.VideoCapture(self.camera_index)
is_camera_open = tmp_camera.isOpened()
# Release camera to make it accessible for `find_camera_indices`
del tmp_camera
# If the camera doesn't work, display the camera indices corresponding to
# valid cameras.
if not is_camera_open:
# Verify that the provided `camera_index` is valid before printing the traceback
available_cam_ids = find_camera_indices()
if self.camera_index not in available_cam_ids:
raise ValueError(
f"`camera_index` is expected to be one of these available cameras {available_cam_ids}, but {self.camera_index} is provided instead. "
"To find the camera index you should use, run `python lerobot/common/robot_devices/cameras/opencv.py`."
)
raise OSError(f"Can't access camera {self.camera_index}.")
# Secondly, create the camera that will be used downstream.
# Note: For some unknown reason, calling `isOpened` blocks the camera which then
# needs to be re-created.
if platform.system() == "Linux":
self.camera = cv2.VideoCapture(f"/dev/video{self.camera_index}")
else:
self.camera = cv2.VideoCapture(self.camera_index)
if self.fps is not None:
self.camera.set(cv2.CAP_PROP_FPS, self.fps)
if self.width is not None:
self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
if self.height is not None:
self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
actual_fps = self.camera.get(cv2.CAP_PROP_FPS)
actual_width = self.camera.get(cv2.CAP_PROP_FRAME_WIDTH)
actual_height = self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT)
if self.fps is not None and not math.isclose(self.fps, actual_fps, rel_tol=1e-3):
raise OSError(
f"Can't set {self.fps=} for camera {self.camera_index}. Actual value is {actual_fps}."
)
if self.width is not None and self.width != actual_width:
raise OSError(
f"Can't set {self.width=} for camera {self.camera_index}. Actual value is {actual_width}."
)
if self.height is not None and self.height != actual_height:
raise OSError(
f"Can't set {self.height=} for camera {self.camera_index}. Actual value is {actual_height}."
)
self.fps = actual_fps
self.width = actual_width
self.height = actual_height
self.is_connected = True
def read(self, temporary_color_mode: str | None = None) -> np.ndarray:
"""Read a frame from the camera returned in the format (height, width, channels)
(e.g. (640, 480, 3)), contrarily to the pytorch format which is channel first.
Note: Reading a frame is done every `camera.fps` times per second, and it is blocking.
If you are reading data from other sensors, we advise to use `camera.async_read()` which is non blocking version of `camera.read()`.
"""
if not self.is_connected:
raise RobotDeviceNotConnectedError(
f"OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first."
)
start_time = time.perf_counter()
ret, color_image = self.camera.read()
if not ret:
raise OSError(f"Can't capture color image from camera {self.camera_index}.")
requested_color_mode = self.color_mode if temporary_color_mode is None else temporary_color_mode
if requested_color_mode not in ["rgb", "bgr"]:
raise ValueError(
f"Expected color values are 'rgb' or 'bgr', but {requested_color_mode} is provided."
)
# OpenCV uses BGR format as default (blue, green red) for all operations, including displaying images.
# However, Deep Learning framework such as LeRobot uses RGB format as default to train neural networks,
# so we convert the image color from BGR to RGB.
if requested_color_mode == "rgb":
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
h, w, _ = color_image.shape
if h != self.height or w != self.width:
raise OSError(
f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
)
# log the number of seconds it took to read the image
self.logs["delta_timestamp_s"] = time.perf_counter() - start_time
# log the utc time at which the image was received
self.logs["timestamp_utc"] = capture_timestamp_utc()
return color_image
def read_loop(self):
while self.stop_event is None or not self.stop_event.is_set():
self.color_image = self.read()
def async_read(self):
if not self.is_connected:
raise RobotDeviceNotConnectedError(
f"OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first."
)
if self.thread is None:
self.stop_event = threading.Event()
self.thread = Thread(target=self.read_loop, args=())
self.thread.daemon = True
self.thread.start()
num_tries = 0
while self.color_image is None:
num_tries += 1
time.sleep(1 / self.fps)
if num_tries > self.fps and (self.thread.ident is None or not self.thread.is_alive()):
raise Exception(
"The thread responsible for `self.async_read()` took too much time to start. There might be an issue. Verify that `self.thread.start()` has been called."
)
return self.color_image
def disconnect(self):
if not self.is_connected:
raise RobotDeviceNotConnectedError(
f"OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first."
)
if self.thread is not None and self.thread.is_alive():
# wait for the thread to finish
self.stop_event.set()
self.thread.join()
self.thread = None
self.stop_event = None
self.camera.release()
self.camera = None
self.is_connected = False
def __del__(self):
if getattr(self, "is_connected", False):
self.disconnect()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Save a few frames using `OpenCVCamera` for all cameras connected to the computer, or a selected subset."
)
parser.add_argument(
"--camera-ids",
type=int,
nargs="*",
default=None,
help="List of camera indices used to instantiate the `OpenCVCamera`. If not provided, find and use all available camera indices.",
)
parser.add_argument(
"--fps",
type=int,
default=None,
help="Set the number of frames recorded per seconds for all cameras. If not provided, use the default fps of each camera.",
)
parser.add_argument(
"--width",
type=str,
default=None,
help="Set the width for all cameras. If not provided, use the default width of each camera.",
)
parser.add_argument(
"--height",
type=str,
default=None,
help="Set the height for all cameras. If not provided, use the default height of each camera.",
)
parser.add_argument(
"--images-dir",
type=Path,
default="outputs/images_from_opencv_cameras",
help="Set directory to save a few frames for each camera.",
)
parser.add_argument(
"--record-time-s",
type=float,
default=2.0,
help="Set the number of seconds used to record the frames. By default, 2 seconds.",
)
args = parser.parse_args()
save_images_from_cameras(**vars(args))
|
lerobot/lerobot/common/robot_devices/cameras/opencv.py/0
|
{
"file_path": "lerobot/lerobot/common/robot_devices/cameras/opencv.py",
"repo_id": "lerobot",
"token_count": 6654
}
| 172
|
# @package _global_
fps: 10
env:
name: pusht
task: PushT-v0
image_size: 96
state_dim: 2
action_dim: 2
fps: ${fps}
episode_length: 300
gym:
obs_type: pixels_agent_pos
render_mode: rgb_array
visualization_width: 384
visualization_height: 384
|
lerobot/lerobot/configs/env/pusht.yaml/0
|
{
"file_path": "lerobot/lerobot/configs/env/pusht.yaml",
"repo_id": "lerobot",
"token_count": 112
}
| 173
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Use this script to convert your dataset into LeRobot dataset format and upload it to the Hugging Face hub,
or store it locally. LeRobot dataset format is lightweight, fast to load from, and does not require any
installation of neural net specific packages like pytorch, tensorflow, jax.
Example of how to download raw datasets, convert them into LeRobotDataset format, and push them to the hub:
```
python lerobot/scripts/push_dataset_to_hub.py \
--raw-dir data/pusht_raw \
--raw-format pusht_zarr \
--repo-id lerobot/pusht
python lerobot/scripts/push_dataset_to_hub.py \
--raw-dir data/xarm_lift_medium_raw \
--raw-format xarm_pkl \
--repo-id lerobot/xarm_lift_medium
python lerobot/scripts/push_dataset_to_hub.py \
--raw-dir data/aloha_sim_insertion_scripted_raw \
--raw-format aloha_hdf5 \
--repo-id lerobot/aloha_sim_insertion_scripted
python lerobot/scripts/push_dataset_to_hub.py \
--raw-dir data/umi_cup_in_the_wild_raw \
--raw-format umi_zarr \
--repo-id lerobot/umi_cup_in_the_wild
```
"""
import argparse
import json
import shutil
import warnings
from pathlib import Path
from typing import Any
import torch
from huggingface_hub import HfApi
from safetensors.torch import save_file
from lerobot.common.datasets.compute_stats import compute_stats
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id
from lerobot.common.datasets.utils import create_branch, create_lerobot_dataset_card, flatten_dict
def get_from_raw_to_lerobot_format_fn(raw_format: str):
if raw_format == "pusht_zarr":
from lerobot.common.datasets.push_dataset_to_hub.pusht_zarr_format import from_raw_to_lerobot_format
elif raw_format == "umi_zarr":
from lerobot.common.datasets.push_dataset_to_hub.umi_zarr_format import from_raw_to_lerobot_format
elif raw_format == "aloha_hdf5":
from lerobot.common.datasets.push_dataset_to_hub.aloha_hdf5_format import from_raw_to_lerobot_format
elif "openx_rlds" in raw_format:
from lerobot.common.datasets.push_dataset_to_hub.openx_rlds_format import from_raw_to_lerobot_format
elif raw_format == "dora_parquet":
from lerobot.common.datasets.push_dataset_to_hub.dora_parquet_format import from_raw_to_lerobot_format
elif raw_format == "xarm_pkl":
from lerobot.common.datasets.push_dataset_to_hub.xarm_pkl_format import from_raw_to_lerobot_format
elif raw_format == "cam_png":
from lerobot.common.datasets.push_dataset_to_hub.cam_png_format import from_raw_to_lerobot_format
else:
raise ValueError(
f"The selected {raw_format} can't be found. Did you add it to `lerobot/scripts/push_dataset_to_hub.py::get_from_raw_to_lerobot_format_fn`?"
)
return from_raw_to_lerobot_format
def save_meta_data(
info: dict[str, Any], stats: dict, episode_data_index: dict[str, list], meta_data_dir: Path
):
meta_data_dir.mkdir(parents=True, exist_ok=True)
# save info
info_path = meta_data_dir / "info.json"
with open(str(info_path), "w") as f:
json.dump(info, f, indent=4)
# save stats
stats_path = meta_data_dir / "stats.safetensors"
save_file(flatten_dict(stats), stats_path)
# save episode_data_index
episode_data_index = {key: torch.tensor(episode_data_index[key]) for key in episode_data_index}
ep_data_idx_path = meta_data_dir / "episode_data_index.safetensors"
save_file(episode_data_index, ep_data_idx_path)
def push_meta_data_to_hub(repo_id: str, meta_data_dir: str | Path, revision: str | None):
"""Expect all meta data files to be all stored in a single "meta_data" directory.
On the hugging face repositery, they will be uploaded in a "meta_data" directory at the root.
"""
api = HfApi()
api.upload_folder(
folder_path=meta_data_dir,
path_in_repo="meta_data",
repo_id=repo_id,
revision=revision,
repo_type="dataset",
)
def push_dataset_card_to_hub(
repo_id: str, revision: str | None, tags: list | None = None, text: str | None = None
):
"""Creates and pushes a LeRobotDataset Card with appropriate tags to easily find it on the hub."""
card = create_lerobot_dataset_card(tags=tags, text=text)
card.push_to_hub(repo_id=repo_id, repo_type="dataset", revision=revision)
def push_videos_to_hub(repo_id: str, videos_dir: str | Path, revision: str | None):
"""Expect mp4 files to be all stored in a single "videos" directory.
On the hugging face repositery, they will be uploaded in a "videos" directory at the root.
"""
api = HfApi()
api.upload_folder(
folder_path=videos_dir,
path_in_repo="videos",
repo_id=repo_id,
revision=revision,
repo_type="dataset",
allow_patterns="*.mp4",
)
def push_dataset_to_hub(
raw_dir: Path,
raw_format: str,
repo_id: str,
push_to_hub: bool = True,
local_dir: Path | None = None,
fps: int | None = None,
video: bool = True,
batch_size: int = 32,
num_workers: int = 8,
episodes: list[int] | None = None,
force_override: bool = False,
resume: bool = False,
cache_dir: Path = Path("/tmp"),
tests_data_dir: Path | None = None,
encoding: dict | None = None,
):
check_repo_id(repo_id)
user_id, dataset_id = repo_id.split("/")
# Robustify when `raw_dir` is str instead of Path
raw_dir = Path(raw_dir)
if not raw_dir.exists():
raise NotADirectoryError(
f"{raw_dir} does not exists. Check your paths or run this command to download an existing raw dataset on the hub: "
f"`python lerobot/common/datasets/push_dataset_to_hub/_download_raw.py --raw-dir your/raw/dir --repo-id your/repo/id_raw`"
)
if local_dir:
# Robustify when `local_dir` is str instead of Path
local_dir = Path(local_dir)
# Send warning if local_dir isn't well formated
if local_dir.parts[-2] != user_id or local_dir.parts[-1] != dataset_id:
warnings.warn(
f"`local_dir` ({local_dir}) doesn't contain a community or user id `/` the name of the dataset that match the `repo_id` (e.g. 'data/lerobot/pusht'). Following this naming convention is advised, but not mandatory.",
stacklevel=1,
)
# Check we don't override an existing `local_dir` by mistake
if local_dir.exists():
if force_override:
shutil.rmtree(local_dir)
elif not resume:
raise ValueError(f"`local_dir` already exists ({local_dir}). Use `--force-override 1`.")
meta_data_dir = local_dir / "meta_data"
videos_dir = local_dir / "videos"
else:
# Temporary directory used to store images, videos, meta_data
meta_data_dir = Path(cache_dir) / "meta_data"
videos_dir = Path(cache_dir) / "videos"
if raw_format is None:
# TODO(rcadene, adilzouitine): implement auto_find_raw_format
raise NotImplementedError()
# raw_format = auto_find_raw_format(raw_dir)
# convert dataset from original raw format to LeRobot format
from_raw_to_lerobot_format = get_from_raw_to_lerobot_format_fn(raw_format)
fmt_kwgs = {
"raw_dir": raw_dir,
"videos_dir": videos_dir,
"fps": fps,
"video": video,
"episodes": episodes,
"encoding": encoding,
}
if "openx_rlds." in raw_format:
# Support for official OXE dataset name inside `raw_format`.
# For instance, `raw_format="oxe_rlds"` uses the default formating (TODO what does that mean?),
# and `raw_format="oxe_rlds.bridge_orig"` uses the brdige_orig formating
_, openx_dataset_name = raw_format.split(".")
print(f"Converting dataset [{openx_dataset_name}] from 'openx_rlds' to LeRobot format.")
fmt_kwgs["openx_dataset_name"] = openx_dataset_name
hf_dataset, episode_data_index, info = from_raw_to_lerobot_format(**fmt_kwgs)
lerobot_dataset = LeRobotDataset.from_preloaded(
repo_id=repo_id,
hf_dataset=hf_dataset,
episode_data_index=episode_data_index,
info=info,
videos_dir=videos_dir,
)
stats = compute_stats(lerobot_dataset, batch_size, num_workers)
if local_dir:
hf_dataset = hf_dataset.with_format(None) # to remove transforms that cant be saved
hf_dataset.save_to_disk(str(local_dir / "train"))
if push_to_hub or local_dir:
# mandatory for upload
save_meta_data(info, stats, episode_data_index, meta_data_dir)
if push_to_hub:
hf_dataset.push_to_hub(repo_id, revision="main")
push_meta_data_to_hub(repo_id, meta_data_dir, revision="main")
push_dataset_card_to_hub(repo_id, revision="main")
if video:
push_videos_to_hub(repo_id, videos_dir, revision="main")
create_branch(repo_id, repo_type="dataset", branch=CODEBASE_VERSION)
if tests_data_dir:
# get the first episode
num_items_first_ep = episode_data_index["to"][0] - episode_data_index["from"][0]
test_hf_dataset = hf_dataset.select(range(num_items_first_ep))
episode_data_index = {k: v[:1] for k, v in episode_data_index.items()}
test_hf_dataset = test_hf_dataset.with_format(None)
test_hf_dataset.save_to_disk(str(tests_data_dir / repo_id / "train"))
tests_meta_data = tests_data_dir / repo_id / "meta_data"
save_meta_data(info, stats, episode_data_index, tests_meta_data)
# copy videos of first episode to tests directory
episode_index = 0
tests_videos_dir = tests_data_dir / repo_id / "videos"
tests_videos_dir.mkdir(parents=True, exist_ok=True)
for key in lerobot_dataset.video_frame_keys:
fname = f"{key}_episode_{episode_index:06d}.mp4"
shutil.copy(videos_dir / fname, tests_videos_dir / fname)
if local_dir is None:
# clear cache
shutil.rmtree(meta_data_dir)
shutil.rmtree(videos_dir)
return lerobot_dataset
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--raw-dir",
type=Path,
required=True,
help="Directory containing input raw datasets (e.g. `data/aloha_mobile_chair_raw` or `data/pusht_raw).",
)
# TODO(rcadene): add automatic detection of the format
parser.add_argument(
"--raw-format",
type=str,
required=True,
help="Dataset type (e.g. `pusht_zarr`, `umi_zarr`, `aloha_hdf5`, `xarm_pkl`, `dora_parquet`, `openx_rlds`).",
)
parser.add_argument(
"--repo-id",
type=str,
required=True,
help="Repositery identifier on Hugging Face: a community or a user name `/` the name of the dataset (e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).",
)
parser.add_argument(
"--local-dir",
type=Path,
help="When provided, writes the dataset converted to LeRobotDataset format in this directory (e.g. `data/lerobot/aloha_mobile_chair`).",
)
parser.add_argument(
"--push-to-hub",
type=int,
default=1,
help="Upload to hub.",
)
parser.add_argument(
"--fps",
type=int,
help="Frame rate used to collect videos. If not provided, use the default one specified in the code.",
)
parser.add_argument(
"--video",
type=int,
default=1,
help="Convert each episode of the raw dataset to an mp4 video. This option allows 60 times lower disk space consumption and 25 faster loading time during training.",
)
parser.add_argument(
"--batch-size",
type=int,
default=32,
help="Batch size loaded by DataLoader for computing the dataset statistics.",
)
parser.add_argument(
"--num-workers",
type=int,
default=8,
help="Number of processes of Dataloader for computing the dataset statistics.",
)
parser.add_argument(
"--episodes",
type=int,
nargs="*",
help="When provided, only converts the provided episodes (e.g `--episodes 2 3 4`). Useful to test the code on 1 episode.",
)
parser.add_argument(
"--force-override",
type=int,
default=0,
help="When set to 1, removes provided output directory if it already exists. By default, raises a ValueError exception.",
)
parser.add_argument(
"--resume",
type=int,
default=0,
help="When set to 1, resumes a previous run.",
)
parser.add_argument(
"--cache-dir",
type=Path,
required=False,
default="/tmp",
help="Directory to store the temporary videos and images generated while creating the dataset.",
)
parser.add_argument(
"--tests-data-dir",
type=Path,
help=(
"When provided, save tests artifacts into the given directory "
"(e.g. `--tests-data-dir tests/data` will save to tests/data/{--repo-id})."
),
)
args = parser.parse_args()
push_dataset_to_hub(**vars(args))
if __name__ == "__main__":
main()
|
lerobot/lerobot/scripts/push_dataset_to_hub.py/0
|
{
"file_path": "lerobot/lerobot/scripts/push_dataset_to_hub.py",
"repo_id": "lerobot",
"token_count": 5879
}
| 174
|
version https://git-lfs.github.com/spec/v1
oid sha256:095c30bfe3c5da168c85aceef905e74e2142866332282965aa6812f6e6e48448
size 4344
|
lerobot/tests/data/lerobot/aloha_mobile_chair/meta_data/stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_mobile_chair/meta_data/stats.safetensors",
"repo_id": "lerobot",
"token_count": 63
}
| 175
|
version https://git-lfs.github.com/spec/v1
oid sha256:c0b18566cbf59e399ea40f1630df12ffbbb9f73bbc733d1d4eba62d675b1fda5
size 247
|
lerobot/tests/data/lerobot/aloha_sim_insertion_human/train/state.json/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_sim_insertion_human/train/state.json",
"repo_id": "lerobot",
"token_count": 66
}
| 176
|
version https://git-lfs.github.com/spec/v1
oid sha256:1441f982dcda008d470b3df1283431584e05838b69a374e5afc45483132baa41
size 683048
|
lerobot/tests/data/lerobot/aloha_static_coffee_new/train/data-00000-of-00001.arrow/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_static_coffee_new/train/data-00000-of-00001.arrow",
"repo_id": "lerobot",
"token_count": 63
}
| 177
|
version https://git-lfs.github.com/spec/v1
oid sha256:af0c9dfe9d1e8caa0f9b85ea64895ac7898d6a39414b00b6ced19955e8eceef6
size 982
|
lerobot/tests/data/lerobot/pusht_image/train/dataset_info.json/0
|
{
"file_path": "lerobot/tests/data/lerobot/pusht_image/train/dataset_info.json",
"repo_id": "lerobot",
"token_count": 69
}
| 178
|
version https://git-lfs.github.com/spec/v1
oid sha256:a219f973d6535f40737265fd15d81944aabf8eb7527384d28c507926bfa89f25
size 912
|
lerobot/tests/data/lerobot/xarm_lift_medium_replay/train/dataset_info.json/0
|
{
"file_path": "lerobot/tests/data/lerobot/xarm_lift_medium_replay/train/dataset_info.json",
"repo_id": "lerobot",
"token_count": 64
}
| 179
|
version https://git-lfs.github.com/spec/v1
oid sha256:1c5814b892ceeb0869ec2f1821ab2c5040cbc0f1351a89f5369c67bd137cfe48
size 105064
|
lerobot/tests/data/lerobot/xarm_push_medium_replay_image/train/data-00000-of-00001.arrow/0
|
{
"file_path": "lerobot/tests/data/lerobot/xarm_push_medium_replay_image/train/data-00000-of-00001.arrow",
"repo_id": "lerobot",
"token_count": 66
}
| 180
|
version https://git-lfs.github.com/spec/v1
oid sha256:24150994c6959631dc081b43e4001a8664e13b194ac194a32100f7d3fd2c0d0f
size 85353
|
lerobot/tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_1.safetensors/0
|
{
"file_path": "lerobot/tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_1.safetensors",
"repo_id": "lerobot",
"token_count": 65
}
| 181
|
version https://git-lfs.github.com/spec/v1
oid sha256:c9007dd51c748db4ecd6d75e70bdcabf8c312454ac97bf6710895a12e7288557
size 31672
|
lerobot/tests/data/save_policy_to_safetensors/dora_aloha_real_act_real/grad_stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/save_policy_to_safetensors/dora_aloha_real_act_real/grad_stats.safetensors",
"repo_id": "lerobot",
"token_count": 62
}
| 182
|
"""
This file contains generic tests to ensure that nothing breaks if we modify the push_dataset_to_hub API.
Also, this file contains backward compatibility tests. Because they are slow and require to download the raw datasets,
we skip them for now in our CI.
Example to run backward compatiblity tests locally:
```
DATA_DIR=tests/data python -m pytest --run-skipped tests/test_push_dataset_to_hub.py::test_push_dataset_to_hub_pusht_backward_compatibility
```
"""
from pathlib import Path
import numpy as np
import pytest
import torch
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from lerobot.common.datasets.push_dataset_to_hub.utils import save_images_concurrently
from lerobot.common.datasets.video_utils import encode_video_frames
from lerobot.scripts.push_dataset_to_hub import push_dataset_to_hub
from tests.utils import require_package_arg
def _mock_download_raw_pusht(raw_dir, num_frames=4, num_episodes=3):
import zarr
raw_dir.mkdir(parents=True, exist_ok=True)
zarr_path = raw_dir / "pusht_cchi_v7_replay.zarr"
store = zarr.DirectoryStore(zarr_path)
zarr_data = zarr.group(store=store)
zarr_data.create_dataset(
"data/action", shape=(num_frames, 1), chunks=(num_frames, 1), dtype=np.float32, overwrite=True
)
zarr_data.create_dataset(
"data/img",
shape=(num_frames, 96, 96, 3),
chunks=(num_frames, 96, 96, 3),
dtype=np.uint8,
overwrite=True,
)
zarr_data.create_dataset(
"data/n_contacts", shape=(num_frames, 2), chunks=(num_frames, 2), dtype=np.float32, overwrite=True
)
zarr_data.create_dataset(
"data/state", shape=(num_frames, 5), chunks=(num_frames, 5), dtype=np.float32, overwrite=True
)
zarr_data.create_dataset(
"data/keypoint", shape=(num_frames, 9, 2), chunks=(num_frames, 9, 2), dtype=np.float32, overwrite=True
)
zarr_data.create_dataset(
"meta/episode_ends", shape=(num_episodes,), chunks=(num_episodes,), dtype=np.int32, overwrite=True
)
zarr_data["data/action"][:] = np.random.randn(num_frames, 1)
zarr_data["data/img"][:] = np.random.randint(0, 255, size=(num_frames, 96, 96, 3), dtype=np.uint8)
zarr_data["data/n_contacts"][:] = np.random.randn(num_frames, 2)
zarr_data["data/state"][:] = np.random.randn(num_frames, 5)
zarr_data["data/keypoint"][:] = np.random.randn(num_frames, 9, 2)
zarr_data["meta/episode_ends"][:] = np.array([1, 3, 4])
store.close()
def _mock_download_raw_umi(raw_dir, num_frames=4, num_episodes=3):
import zarr
raw_dir.mkdir(parents=True, exist_ok=True)
zarr_path = raw_dir / "cup_in_the_wild.zarr"
store = zarr.DirectoryStore(zarr_path)
zarr_data = zarr.group(store=store)
zarr_data.create_dataset(
"data/camera0_rgb",
shape=(num_frames, 96, 96, 3),
chunks=(num_frames, 96, 96, 3),
dtype=np.uint8,
overwrite=True,
)
zarr_data.create_dataset(
"data/robot0_demo_end_pose",
shape=(num_frames, 5),
chunks=(num_frames, 5),
dtype=np.float32,
overwrite=True,
)
zarr_data.create_dataset(
"data/robot0_demo_start_pose",
shape=(num_frames, 5),
chunks=(num_frames, 5),
dtype=np.float32,
overwrite=True,
)
zarr_data.create_dataset(
"data/robot0_eef_pos", shape=(num_frames, 5), chunks=(num_frames, 5), dtype=np.float32, overwrite=True
)
zarr_data.create_dataset(
"data/robot0_eef_rot_axis_angle",
shape=(num_frames, 5),
chunks=(num_frames, 5),
dtype=np.float32,
overwrite=True,
)
zarr_data.create_dataset(
"data/robot0_gripper_width",
shape=(num_frames, 5),
chunks=(num_frames, 5),
dtype=np.float32,
overwrite=True,
)
zarr_data.create_dataset(
"meta/episode_ends", shape=(num_episodes,), chunks=(num_episodes,), dtype=np.int32, overwrite=True
)
zarr_data["data/camera0_rgb"][:] = np.random.randint(0, 255, size=(num_frames, 96, 96, 3), dtype=np.uint8)
zarr_data["data/robot0_demo_end_pose"][:] = np.random.randn(num_frames, 5)
zarr_data["data/robot0_demo_start_pose"][:] = np.random.randn(num_frames, 5)
zarr_data["data/robot0_eef_pos"][:] = np.random.randn(num_frames, 5)
zarr_data["data/robot0_eef_rot_axis_angle"][:] = np.random.randn(num_frames, 5)
zarr_data["data/robot0_gripper_width"][:] = np.random.randn(num_frames, 5)
zarr_data["meta/episode_ends"][:] = np.array([1, 3, 4])
store.close()
def _mock_download_raw_xarm(raw_dir, num_frames=4):
import pickle
dataset_dict = {
"observations": {
"rgb": np.random.randint(0, 255, size=(num_frames, 3, 84, 84), dtype=np.uint8),
"state": np.random.randn(num_frames, 4),
},
"actions": np.random.randn(num_frames, 3),
"rewards": np.random.randn(num_frames),
"masks": np.random.randn(num_frames),
"dones": np.array([False, True, True, True]),
}
raw_dir.mkdir(parents=True, exist_ok=True)
pkl_path = raw_dir / "buffer.pkl"
with open(pkl_path, "wb") as f:
pickle.dump(dataset_dict, f)
def _mock_download_raw_aloha(raw_dir, num_frames=6, num_episodes=3):
import h5py
for ep_idx in range(num_episodes):
raw_dir.mkdir(parents=True, exist_ok=True)
path_h5 = raw_dir / f"episode_{ep_idx}.hdf5"
with h5py.File(str(path_h5), "w") as f:
f.create_dataset("action", data=np.random.randn(num_frames // num_episodes, 14))
f.create_dataset("observations/qpos", data=np.random.randn(num_frames // num_episodes, 14))
f.create_dataset("observations/qvel", data=np.random.randn(num_frames // num_episodes, 14))
f.create_dataset(
"observations/images/top",
data=np.random.randint(
0, 255, size=(num_frames // num_episodes, 480, 640, 3), dtype=np.uint8
),
)
def _mock_download_raw_dora(raw_dir, num_frames=6, num_episodes=3, fps=30):
from datetime import datetime, timedelta, timezone
import pandas
def write_parquet(key, timestamps, values):
data = {
"timestamp_utc": timestamps,
key: values,
}
df = pandas.DataFrame(data)
raw_dir.mkdir(parents=True, exist_ok=True)
df.to_parquet(raw_dir / f"{key}.parquet", engine="pyarrow")
episode_indices = [None, None, -1, None, None, -1, None, None, -1]
episode_indices_mapping = [0, 0, 0, 1, 1, 1, 2, 2, 2]
frame_indices = [0, 1, -1, 0, 1, -1, 0, 1, -1]
cam_key = "observation.images.cam_high"
timestamps = []
actions = []
states = []
frames = []
# `+ num_episodes`` for buffer frames associated to episode_index=-1
for i, frame_idx in enumerate(frame_indices):
t_utc = datetime.now(timezone.utc) + timedelta(seconds=i / fps)
action = np.random.randn(21).tolist()
state = np.random.randn(21).tolist()
ep_idx = episode_indices_mapping[i]
frame = [{"path": f"videos/{cam_key}_episode_{ep_idx:06d}.mp4", "timestamp": frame_idx / fps}]
timestamps.append(t_utc)
actions.append(action)
states.append(state)
frames.append(frame)
write_parquet(cam_key, timestamps, frames)
write_parquet("observation.state", timestamps, states)
write_parquet("action", timestamps, actions)
write_parquet("episode_index", timestamps, episode_indices)
# write fake mp4 file for each episode
for ep_idx in range(num_episodes):
imgs_array = np.random.randint(0, 255, size=(num_frames // num_episodes, 480, 640, 3), dtype=np.uint8)
tmp_imgs_dir = raw_dir / "tmp_images"
save_images_concurrently(imgs_array, tmp_imgs_dir)
fname = f"{cam_key}_episode_{ep_idx:06d}.mp4"
video_path = raw_dir / "videos" / fname
encode_video_frames(tmp_imgs_dir, video_path, fps, vcodec="libx264")
def _mock_download_raw(raw_dir, repo_id):
if "wrist_gripper" in repo_id:
_mock_download_raw_dora(raw_dir)
elif "aloha" in repo_id:
_mock_download_raw_aloha(raw_dir)
elif "pusht" in repo_id:
_mock_download_raw_pusht(raw_dir)
elif "xarm" in repo_id:
_mock_download_raw_xarm(raw_dir)
elif "umi" in repo_id:
_mock_download_raw_umi(raw_dir)
else:
raise ValueError(repo_id)
def test_push_dataset_to_hub_invalid_repo_id(tmpdir):
with pytest.raises(ValueError):
push_dataset_to_hub(Path(tmpdir), "raw_format", "invalid_repo_id")
def test_push_dataset_to_hub_out_dir_force_override_false(tmpdir):
tmpdir = Path(tmpdir)
out_dir = tmpdir / "out"
raw_dir = tmpdir / "raw"
# mkdir to skip download
raw_dir.mkdir(parents=True, exist_ok=True)
with pytest.raises(ValueError):
push_dataset_to_hub(
raw_dir=raw_dir,
raw_format="some_format",
repo_id="user/dataset",
local_dir=out_dir,
force_override=False,
)
@pytest.mark.parametrize(
"required_packages, raw_format, repo_id, make_test_data",
[
(["gym_pusht"], "pusht_zarr", "lerobot/pusht", False),
(["gym_pusht"], "pusht_zarr", "lerobot/pusht", True),
(None, "xarm_pkl", "lerobot/xarm_lift_medium", False),
(None, "aloha_hdf5", "lerobot/aloha_sim_insertion_scripted", False),
(["imagecodecs"], "umi_zarr", "lerobot/umi_cup_in_the_wild", False),
(None, "dora_parquet", "cadene/wrist_gripper", False),
],
)
@require_package_arg
def test_push_dataset_to_hub_format(required_packages, tmpdir, raw_format, repo_id, make_test_data):
num_episodes = 3
tmpdir = Path(tmpdir)
raw_dir = tmpdir / f"{repo_id}_raw"
_mock_download_raw(raw_dir, repo_id)
local_dir = tmpdir / repo_id
lerobot_dataset = push_dataset_to_hub(
raw_dir=raw_dir,
raw_format=raw_format,
repo_id=repo_id,
push_to_hub=False,
local_dir=local_dir,
force_override=False,
cache_dir=tmpdir / "cache",
tests_data_dir=tmpdir / "tests/data" if make_test_data else None,
encoding={"vcodec": "libx264"},
)
# minimal generic tests on the local directory containing LeRobotDataset
assert (local_dir / "meta_data" / "info.json").exists()
assert (local_dir / "meta_data" / "stats.safetensors").exists()
assert (local_dir / "meta_data" / "episode_data_index.safetensors").exists()
for i in range(num_episodes):
for cam_key in lerobot_dataset.camera_keys:
assert (local_dir / "videos" / f"{cam_key}_episode_{i:06d}.mp4").exists()
assert (local_dir / "train" / "dataset_info.json").exists()
assert (local_dir / "train" / "state.json").exists()
assert len(list((local_dir / "train").glob("*.arrow"))) > 0
# minimal generic tests on the item
item = lerobot_dataset[0]
assert "index" in item
assert "episode_index" in item
assert "timestamp" in item
for cam_key in lerobot_dataset.camera_keys:
assert cam_key in item
if make_test_data:
# Check that only the first episode is selected.
test_dataset = LeRobotDataset(repo_id=repo_id, root=tmpdir / "tests/data")
num_frames = sum(
i == lerobot_dataset.hf_dataset["episode_index"][0]
for i in lerobot_dataset.hf_dataset["episode_index"]
).item()
assert (
test_dataset.hf_dataset["episode_index"]
== lerobot_dataset.hf_dataset["episode_index"][:num_frames]
)
for k in ["from", "to"]:
assert torch.equal(test_dataset.episode_data_index[k], lerobot_dataset.episode_data_index[k][:1])
@pytest.mark.parametrize(
"raw_format, repo_id",
[
# TODO(rcadene): add raw dataset test artifacts
("pusht_zarr", "lerobot/pusht"),
("xarm_pkl", "lerobot/xarm_lift_medium"),
("aloha_hdf5", "lerobot/aloha_sim_insertion_scripted"),
("umi_zarr", "lerobot/umi_cup_in_the_wild"),
("dora_parquet", "cadene/wrist_gripper"),
],
)
@pytest.mark.skip(
"Not compatible with our CI since it downloads raw datasets. Run with `DATA_DIR=tests/data python -m pytest --run-skipped tests/test_push_dataset_to_hub.py::test_push_dataset_to_hub_pusht_backward_compatibility`"
)
def test_push_dataset_to_hub_pusht_backward_compatibility(tmpdir, raw_format, repo_id):
_, dataset_id = repo_id.split("/")
tmpdir = Path(tmpdir)
raw_dir = tmpdir / f"{dataset_id}_raw"
local_dir = tmpdir / repo_id
push_dataset_to_hub(
raw_dir=raw_dir,
raw_format=raw_format,
repo_id=repo_id,
push_to_hub=False,
local_dir=local_dir,
force_override=False,
cache_dir=tmpdir / "cache",
episodes=[0],
)
ds_actual = LeRobotDataset(repo_id, root=tmpdir)
ds_reference = LeRobotDataset(repo_id)
assert len(ds_reference.hf_dataset) == len(ds_actual.hf_dataset)
def check_same_items(item1, item2):
assert item1.keys() == item2.keys(), "Keys mismatch"
for key in item1:
if isinstance(item1[key], torch.Tensor) and isinstance(item2[key], torch.Tensor):
assert torch.equal(item1[key], item2[key]), f"Mismatch found in key: {key}"
else:
assert item1[key] == item2[key], f"Mismatch found in key: {key}"
for i in range(len(ds_reference.hf_dataset)):
item_reference = ds_reference.hf_dataset[i]
item_actual = ds_actual.hf_dataset[i]
check_same_items(item_reference, item_actual)
|
lerobot/tests/test_push_dataset_to_hub.py/0
|
{
"file_path": "lerobot/tests/test_push_dataset_to_hub.py",
"repo_id": "lerobot",
"token_count": 6388
}
| 183
|
import os
import re
import shutil
from dataclasses import field
from pathlib import Path
from typing import Dict, List
import torch
from datasets import concatenate_datasets, load_from_disk
from wandb import Audio
from datasets import load_from_disk, concatenate_datasets
def list_field(default=None, metadata=None):
return field(default_factory=lambda: default, metadata=metadata)
_RE_CHECKPOINT = re.compile(r"^checkpoint-(\d+)-epoch-(\d+)$")
CHECKPOINT_CODEC_PREFIX = "checkpoint"
_RE_CODEC_CHECKPOINT = re.compile(r"^checkpoint-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _RE_CHECKPOINT.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_RE_CHECKPOINT.search(x).groups()[0])))
def sorted_checkpoints(output_dir=None, checkpoint_prefix="checkpoint") -> List[str]:
"""Helper function to sort saved checkpoints from oldest to newest."""
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)]
for path in glob_checkpoints:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def rotate_checkpoints(save_total_limit=None, output_dir=None, checkpoint_prefix="checkpoint", logger=None) -> None:
"""Helper function to delete old checkpoints."""
if save_total_limit is None or save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = sorted_checkpoints(output_dir=output_dir, checkpoint_prefix=checkpoint_prefix)
if len(checkpoints_sorted) <= save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint, ignore_errors=True)
def save_codec_checkpoint(output_dir, dataset, step):
checkpoint_path = f"{CHECKPOINT_CODEC_PREFIX}-{step}"
output_path = os.path.join(output_dir, checkpoint_path)
dataset.save_to_disk(output_path)
def load_codec_checkpoint(checkpoint_path):
dataset = load_from_disk(checkpoint_path)
return dataset
def sorted_codec_checkpoints(output_dir=None) -> List[str]:
"""Helper function to sort saved checkpoints from oldest to newest."""
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{CHECKPOINT_CODEC_PREFIX}-*")]
for path in glob_checkpoints:
regex_match = re.match(f".*{CHECKPOINT_CODEC_PREFIX}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def load_all_codec_checkpoints(output_dir=None) -> List[str]:
"""Helper function to load and concat all checkpoints."""
checkpoints_sorted = sorted_codec_checkpoints(output_dir=output_dir)
datasets = [load_from_disk(checkpoint) for checkpoint in checkpoints_sorted]
datasets = concatenate_datasets(datasets, axis=0)
return datasets
def get_last_codec_checkpoint_step(folder) -> int:
if not os.path.exists(folder) or not os.path.isdir(folder):
os.makedirs(folder, exist_ok=True)
return 0
content = os.listdir(folder)
checkpoints = [path for path in content if _RE_CODEC_CHECKPOINT.search(path) is not None]
if len(checkpoints) == 0:
return 0
last_checkpoint = os.path.join(
folder, max(checkpoints, key=lambda x: int(_RE_CODEC_CHECKPOINT.search(x).groups()[0]))
)
# Find num steps saved state string pattern
pattern = r"checkpoint-(\d+)"
match = re.search(pattern, last_checkpoint)
cur_step = int(match.group(1))
return cur_step
def log_metric(
accelerator,
metrics: Dict,
train_time: float,
step: int,
epoch: int,
learning_rate: float = None,
prefix: str = "train",
):
"""Helper function to log all training/evaluation metrics with the correct prefixes and styling."""
log_metrics = {}
for k, v in metrics.items():
log_metrics[f"{prefix}/{k}"] = v
log_metrics[f"{prefix}/time"] = train_time
log_metrics[f"{prefix}/epoch"] = epoch
if learning_rate is not None:
log_metrics[f"{prefix}/learning_rate"] = learning_rate
accelerator.log(log_metrics, step=step)
def log_pred(
accelerator,
pred_descriptions: List[str],
pred_prompts: List[str],
transcriptions: List[str],
audios: List[torch.Tensor],
si_sdr_measures: List[float],
sampling_rate: int,
step: int,
prefix: str = "eval",
num_lines: int = 200000,
):
"""Helper function to log target/predicted transcriptions to weights and biases (wandb)."""
if accelerator.is_main_process:
wandb_tracker = accelerator.get_tracker("wandb")
# pretty name for current step: step 50000 -> step 50k
cur_step_pretty = f"{int(step // 1000)}k" if step > 1000 else step
prefix_pretty = prefix.replace("/", "-")
if si_sdr_measures is None:
# convert str data to a wandb compatible format
str_data = [
[pred_descriptions[i], pred_prompts[i], transcriptions[i]] for i in range(len(pred_descriptions))
]
# log as a table with the appropriate headers
wandb_tracker.log_table(
table_name=f"predictions/{prefix_pretty}-step-{cur_step_pretty}",
columns=["Target descriptions", "Target prompts", "Predicted transcriptions"],
data=str_data[:num_lines],
step=step,
commit=False,
)
else:
# convert str data to a wandb compatible format
str_data = [
[pred_descriptions[i], pred_prompts[i], transcriptions[i], si_sdr_measures[i]]
for i in range(len(pred_descriptions))
]
# log as a table with the appropriate headers
wandb_tracker.log_table(
table_name=f"predictions/{prefix_pretty}-step-{cur_step_pretty}",
columns=["Target descriptions", "Target prompts", "Predicted transcriptions", "Noise estimation"],
data=str_data[:num_lines],
step=step,
commit=False,
)
# wandb can only loads 100 audios per step
wandb_tracker.log(
{
"Speech samples": [
Audio(
audio,
caption=f"{pred_prompts[i]} --- DESCRIPTION: {pred_descriptions[i]}",
sample_rate=sampling_rate,
)
for (i, audio) in enumerate(audios[: min(len(audios), 100)])
]
},
step=step,
)
|
parler-tts/training/utils.py/0
|
{
"file_path": "parler-tts/training/utils.py",
"repo_id": "parler-tts",
"token_count": 3174
}
| 184
|
# Builds GPU docker image of PyTorch
# Uses multi-staged approach to reduce size
# Stage 1
# Use base conda image to reduce time
FROM continuumio/miniconda3:latest AS compile-image
# Specify py version
ENV PYTHON_VERSION=3.8
# Install apt libs - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
RUN apt-get update && \
apt-get install -y curl git wget software-properties-common git-lfs && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
# Install audio-related libraries
RUN apt-get update && \
apt install -y ffmpeg
RUN apt install -y libsndfile1-dev
RUN git lfs install
# Create our conda env - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
RUN conda create --name peft python=${PYTHON_VERSION} ipython jupyter pip
RUN python3 -m pip install --no-cache-dir --upgrade pip
# Below is copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
# We don't install pytorch here yet since CUDA isn't available
# instead we use the direct torch wheel
ENV PATH /opt/conda/envs/peft/bin:$PATH
# Activate our bash shell
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
# Activate the conda env and install transformers + accelerate from source
RUN source activate peft && \
python3 -m pip install --no-cache-dir \
librosa \
"soundfile>=0.12.1" \
scipy \
git+https://github.com/huggingface/transformers \
git+https://github.com/huggingface/accelerate \
peft[test]@git+https://github.com/huggingface/peft
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
RUN echo "source activate peft" >> ~/.profile
# Activate the virtualenv
CMD ["/bin/bash"]
|
peft/docker/peft-cpu/Dockerfile/0
|
{
"file_path": "peft/docker/peft-cpu/Dockerfile",
"repo_id": "peft",
"token_count": 649
}
| 185
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Contribute to PEFT
We are happy to accept contributions to PEFT. If you plan to contribute, please read this to make the process as smooth as possible.
## Installation
For code contributions to PEFT, you should choose the ["source"](../install#source) installation method.
If you are new to creating a pull request, follow the [Creating a pull request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) guide by GitHub.
## Tests and code quality checks
Regardless of the contribution type (unless it’s only about the docs), you should run tests and code quality checks before creating a PR to ensure your contribution doesn’t break anything and follows the project standards.
We provide a Makefile to execute the necessary tests. Run the code below for the unit test:
```sh
make test
```
Run one of the following to either only check or check and fix code quality and style:
```sh
make quality # just check
make style # check and fix
```
You can also set up [`pre-commit`](https://pre-commit.com/) to run these fixes
automatically as Git commit hooks.
```bash
$ pip install pre-commit
$ pre-commit install
```
Running all the tests can take a couple of minutes, so during development it can be more efficient to only run tests specific to your change:
```sh
pytest tests/ -k <name-of-test>
```
This should finish much quicker and allow for faster iteration. However, you should still run the whole test suite before creating a PR because your change can inadvertently break tests that at first glance are unrelated.
If your change is specific to a hardware setting (e.g., it requires CUDA), take a look at [tests/test_gpu_examples.py](https://github.com/huggingface/peft/blob/1c1c7fdaa6e6abaa53939b865dee1eded82ad032/tests/test_gpu_examples.py) and [tests/test_common_gpu.py](https://github.com/huggingface/peft/blob/1c1c7fdaa6e6abaa53939b865dee1eded82ad032/tests/test_common_gpu.py) to see if it makes sense to add tests there. If your change could have an effect on saving and loading models, please run the tests with the `--regression` flag to trigger regression tests.
It can happen that while you’re working on your PR, the underlying code base changes due to other changes being merged. If that happens – especially when there is a merge conflict – please update your branch with the latest changes. This can be a merge or a rebase, and we'll squash and merge the PR once it’s ready.
## PR description
When opening a PR, please provide a nice description of the change you're proposing. If it relates to other issues or PRs, please reference them. Providing a good description not only helps the reviewers review your code better and faster, it can also be used later (as a basis) for the commit message which helps with long term maintenance of the project.
If your code makes some non-trivial changes, it may also be a good idea to add comments to the code to explain those changes. For example, if you had to iterate on your implementation multiple times because the most obvious way didn’t work, it’s a good indication that a code comment is needed.
## Bugfixes
Please give a description of the circumstances that led to the bug. If there is an existing issue, please link to it (e.g., “Resolves #12345”).
Ideally when a bugfix is provided, it should be accompanied by a test for the bug. The test should fail with the current code and pass with the bugfix. Add a comment to the test that references the issue or PR. Without a test, it is more difficult to prevent regressions in the future.
## Add a new fine-tuning method
New parameter-efficient fine-tuning methods are developed all the time. If you would like to add a new and promising method to PEFT, please follow these steps.
1. Before you start to implement the new method, please open a GitHub issue with your proposal. This way, the maintainers can give you some early feedback.
2. Please add a link to the source (usually a paper) of the method. Some evidence should be provided there is general interest in using the method. We will not add new methods that are freshly published, but there is no evidence of demand for it.
3. When implementing the method, it makes sense to look for existing implementations that already exist as a guide. Moreover, when you structure your code, please take inspiration from the other PEFT methods. For example, if your method is similar to LoRA, it makes sense to structure your code similarly or even reuse some functions or classes where it makes sense (some code duplication is okay, but don’t overdo it).
4. Ideally, in addition to the implementation of the new method, there should also be examples (notebooks, scripts), documentation, and an extensive test suite that proves the method works with a variety of tasks. However, this can be more challenging so it is acceptable to only provide the implementation and at least one working example. Documentation and tests can be added in follow up PRs.
5. Once you have something that seems to be working, don’t hesitate to create a draft PR even if it’s not in a mergeable state yet. The maintainers are happy to give you feedback and guidance along the way.
## Add other features
It is best if you first open an issue on GitHub with a proposal to add the new feature. This way, you can discuss with the maintainers if it makes sense to add the feature before spending too much time on implementing it.
New features should generally be accompanied by tests and documentation or examples. Without the latter, users will have a hard time discovering your cool new feature.
Changes to the code should be implemented in a backward-compatible way. For example, existing code should continue to work the same way after the feature is merged.
|
peft/docs/source/developer_guides/contributing.md/0
|
{
"file_path": "peft/docs/source/developer_guides/contributing.md",
"repo_id": "peft",
"token_count": 1620
}
| 186
|
IDX=$1
PROMPT_IDX=$((IDX % 25))
CLASS_IDX=$((IDX % 30))
# Define the UNIQUE_TOKEN, CLASS_TOKENs, and SUBJECT_NAMES
UNIQUE_TOKEN="qwe"
SUBJECT_NAMES=(
"backpack" "backpack_dog" "bear_plushie" "berry_bowl" "can"
"candle" "cat" "cat2" "clock" "colorful_sneaker"
"dog" "dog2" "dog3" "dog5" "dog6"
"dog7" "dog8" "duck_toy" "fancy_boot" "grey_sloth_plushie"
"monster_toy" "pink_sunglasses" "poop_emoji" "rc_car" "red_cartoon"
"robot_toy" "shiny_sneaker" "teapot" "vase" "wolf_plushie"
)
CLASS_TOKENs=(
"backpack" "backpack" "stuffed animal" "bowl" "can"
"candle" "cat" "cat" "clock" "sneaker"
"dog" "dog" "dog" "dog" "dog"
"dog" "dog" "toy" "boot" "stuffed animal"
"toy" "glasses" "toy" "toy" "cartoon"
"toy" "sneaker" "teapot" "vase" "stuffed animal"
)
CLASS_TOKEN=${CLASS_TOKENs[$CLASS_IDX]}
SELECTED_SUBJECT=${SUBJECT_NAMES[$CLASS_IDX]}
if [[ $CLASS_IDX =~ ^(0|1|2|3|4|5|8|9|17|18|19|20|21|22|23|24|25|26|27|28|29)$ ]]; then
PROMPT_LIST=(
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the jungle."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the snow."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on the beach."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on a cobblestone street."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of pink fabric."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a wooden floor."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a city in the background."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a mountain in the background."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a blue house in the background."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a purple rug in a forest."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a wheat field in the background."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a tree and autumn leaves in the background."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with the Eiffel Tower in the background."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} floating on top of water."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} floating in an ocean of milk."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of green grass with sunflowers around it."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a mirror."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of the sidewalk in a crowded street."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a dirt road."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a white rug."
"a red ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
"a purple ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
"a shiny ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
"a wet ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
"a cube shaped ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
)
prompt_test_list=(
"a ${CLASS_TOKEN} in the jungle"
"a ${CLASS_TOKEN} in the snow"
"a ${CLASS_TOKEN} on the beach"
"a ${CLASS_TOKEN} on a cobblestone street"
"a ${CLASS_TOKEN} on top of pink fabric"
"a ${CLASS_TOKEN} on top of a wooden floor"
"a ${CLASS_TOKEN} with a city in the background"
"a ${CLASS_TOKEN} with a mountain in the background"
"a ${CLASS_TOKEN} with a blue house in the background"
"a ${CLASS_TOKEN} on top of a purple rug in a forest"
"a ${CLASS_TOKEN} with a wheat field in the background"
"a ${CLASS_TOKEN} with a tree and autumn leaves in the background"
"a ${CLASS_TOKEN} with the Eiffel Tower in the background"
"a ${CLASS_TOKEN} floating on top of water"
"a ${CLASS_TOKEN} floating in an ocean of milk"
"a ${CLASS_TOKEN} on top of green grass with sunflowers around it"
"a ${CLASS_TOKEN} on top of a mirror"
"a ${CLASS_TOKEN} on top of the sidewalk in a crowded street"
"a ${CLASS_TOKEN} on top of a dirt road"
"a ${CLASS_TOKEN} on top of a white rug"
"a red ${CLASS_TOKEN}"
"a purple ${CLASS_TOKEN}"
"a shiny ${CLASS_TOKEN}"
"a wet ${CLASS_TOKEN}"
"a cube shaped ${CLASS_TOKEN}"
)
else
PROMPT_LIST=(
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the jungle."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the snow."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on the beach."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on a cobblestone street."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of pink fabric."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a wooden floor."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a city in the background."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a mountain in the background."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a blue house in the background."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a purple rug in a forest."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a red hat."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a santa hat."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a rainbow scarf."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a black top hat and a monocle."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a chef outfit."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a firefighter outfit."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a police outfit."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing pink glasses."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a yellow shirt."
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a purple wizard outfit."
"a red ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
"a purple ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
"a shiny ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
"a wet ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
"a cube shaped ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
)
prompt_test_list=(
"a ${CLASS_TOKEN} in the jungle"
"a ${CLASS_TOKEN} in the snow"
"a ${CLASS_TOKEN} on the beach"
"a ${CLASS_TOKEN} on a cobblestone street"
"a ${CLASS_TOKEN} on top of pink fabric"
"a ${CLASS_TOKEN} on top of a wooden floor"
"a ${CLASS_TOKEN} with a city in the background"
"a ${CLASS_TOKEN} with a mountain in the background"
"a ${CLASS_TOKEN} with a blue house in the background"
"a ${CLASS_TOKEN} on top of a purple rug in a forest"
"a ${CLASS_TOKEN} wearing a red hat"
"a ${CLASS_TOKEN} wearing a santa hat"
"a ${CLASS_TOKEN} wearing a rainbow scarf"
"a ${CLASS_TOKEN} wearing a black top hat and a monocle"
"a ${CLASS_TOKEN} in a chef outfit"
"a ${CLASS_TOKEN} in a firefighter outfit"
"a ${CLASS_TOKEN} in a police outfit"
"a ${CLASS_TOKEN} wearing pink glasses"
"a ${CLASS_TOKEN} wearing a yellow shirt"
"a ${CLASS_TOKEN} in a purple wizard outfit"
"a red ${CLASS_TOKEN}"
"a purple ${CLASS_TOKEN}"
"a shiny ${CLASS_TOKEN}"
"a wet ${CLASS_TOKEN}"
"a cube shaped ${CLASS_TOKEN}"
)
fi
VALIDATION_PROMPT=${PROMPT_LIST[@]}
INSTANCE_PROMPT="a photo of ${UNIQUE_TOKEN} ${CLASS_TOKEN}"
CLASS_PROMPT="a photo of ${CLASS_TOKEN}"
export MODEL_NAME="stabilityai/stable-diffusion-2-1"
# export MODEL_NAME="runwayml/stable-diffusion-v1-5"
PEFT_TYPE="boft"
BLOCK_NUM=8
BLOCK_SIZE=0
N_BUTTERFLY_FACTOR=1
export PROJECT_NAME="dreambooth_${PEFT_TYPE}"
export RUN_NAME="${SELECTED_SUBJECT}_${PEFT_TYPE}_${BLOCK_NUM}${BLOCK_SIZE}${N_BUTTERFLY_FACTOR}"
export INSTANCE_DIR="./data/dreambooth/dataset/${SELECTED_SUBJECT}"
export CLASS_DIR="./data/class_data/${CLASS_TOKEN}"
export OUTPUT_DIR="./data/output/${PEFT_TYPE}"
accelerate launch train_dreambooth.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir="$CLASS_DIR" \
--output_dir=$OUTPUT_DIR \
--wandb_project_name=$PROJECT_NAME \
--wandb_run_name=$RUN_NAME \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="$INSTANCE_PROMPT" \
--validation_prompt="$VALIDATION_PROMPT" \
--class_prompt="$CLASS_PROMPT" \
--resolution=512 \
--train_batch_size=1 \
--num_dataloader_workers=2 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
--use_boft \
--boft_block_num=$BLOCK_NUM \
--boft_block_size=$BLOCK_SIZE \
--boft_n_butterfly_factor=$N_BUTTERFLY_FACTOR \
--boft_dropout=0.1 \
--boft_bias="boft_only" \
--learning_rate=3e-5 \
--max_train_steps=1010 \
--checkpointing_steps=200 \
--validation_steps=200 \
--enable_xformers_memory_efficient_attention \
--report_to="wandb" \
|
peft/examples/boft_dreambooth/train_dreambooth.sh/0
|
{
"file_path": "peft/examples/boft_dreambooth/train_dreambooth.sh",
"repo_id": "peft",
"token_count": 3412
}
| 187
|
<jupyter_start><jupyter_text>Fine-tune large models using 🤗 `peft` adapters, `transformers` & `bitsandbytes`In this tutorial we will cover how we can fine-tune large language models using the very recent `peft` library and `bitsandbytes` for loading large models in 8-bit.The fine-tuning method will rely on a recent method called "Low Rank Adapters" (LoRA), instead of fine-tuning the entire model you just have to fine-tune these adapters and load them properly inside the model. After fine-tuning the model you can also share your adapters on the 🤗 Hub and load them very easily. Let's get started! Install requirementsFirst, run the cells below to install the requirements:<jupyter_code>!pip install -q bitsandbytes datasets accelerate
!pip install -q git+https://github.com/huggingface/transformers.git@main git+https://github.com/huggingface/peft.git<jupyter_output>[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m76.3/76.3 MB[0m [31m10.3 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m462.8/462.8 KB[0m [31m25.4 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m199.7/199.7 KB[0m [31m25.5 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m190.3/190.3 KB[0m [31m23.1 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m213.0/213.0 KB[0m [31m26.4 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m132.0/132.0 KB[0m [31m18.5 MB/s[0m eta [36m0:00:00[0m
[2K [90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━[0m [32m140.6/140.6 KB[0m [31m20.2 MB/s[0m eta [36m0:00:00[0m
[?25h Installing build dependencies ... [?25l[?25hdone
Getting requirements to build wheel ... [?25l[?25hdone
Preparing metadata (pyproject.tom[...]<jupyter_text>Model loadingHere let's load the `opt-6.7b` model, its weights in half-precision (float16) are about 13GB on the Hub! If we load them in 8-bit we would require around 7GB of memory instead.<jupyter_code>import os
import torch
import torch.nn as nn
import bitsandbytes as bnb
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, BitsAndBytesConfig
model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", quantization_config=BitsAndBytesConfig(load_in_8bit=True))
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b")<jupyter_output><empty_output><jupyter_text>Prepare model for trainingSome pre-processing needs to be done before training such an int8 model using `peft`, therefore let's import an utiliy function `prepare_model_for_kbit_training` that will: - Casts all the non `int8` modules to full precision (`fp32`) for stability- Add a `forward_hook` to the input embedding layer to enable gradient computation of the input hidden states- Enable gradient checkpointing for more memory-efficient training<jupyter_code>from peft import prepare_model_for_kbit_training
model = prepare_model_for_kbit_training(model)<jupyter_output><empty_output><jupyter_text>Apply LoRAHere comes the magic with `peft`! Let's load a `PeftModel` and specify that we are going to use low-rank adapters (LoRA) using `get_peft_model` utility function from `peft`.<jupyter_code>def print_trainable_parameters(model):
"""
Prints the number of trainable parameters in the model.
"""
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}"
)
from peft import LoraConfig, get_peft_model
config = LoraConfig(
r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM"
)
model = get_peft_model(model, config)
print_trainable_parameters(model)<jupyter_output>trainable params: 8388608 || all params: 6666862592 || trainable%: 0.12582542214183376<jupyter_text>Training<jupyter_code>import transformers
from datasets import load_dataset
data = load_dataset("Abirate/english_quotes")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
trainer = transformers.Trainer(
model=model,
train_dataset=data["train"],
args=transformers.TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=100,
max_steps=200,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir="outputs",
),
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False # silence the warnings. Please re-enable for inference!
trainer.train()<jupyter_output><empty_output><jupyter_text>Share adapters on the 🤗 Hub<jupyter_code>from huggingface_hub import notebook_login
notebook_login()
model.push_to_hub("ybelkada/opt-6.7b-lora", use_auth_token=True)<jupyter_output>Uploading the following files to ybelkada/opt-6.7b-lora: adapter_config.json,adapter_model.bin<jupyter_text>Load adapters from the HubYou can also directly load adapters from the Hub using the commands below:<jupyter_code>import torch
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
peft_model_id = "ybelkada/opt-6.7b-lora"
config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForCausalLM.from_pretrained(
config.base_model_name_or_path, return_dict=True, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
# Load the Lora model
model = PeftModel.from_pretrained(model, peft_model_id)<jupyter_output><empty_output><jupyter_text>InferenceYou can then directly use the trained model or the model that you have loaded from the 🤗 Hub for inference as you would do it usually in `transformers`.<jupyter_code>batch = tokenizer("Two things are infinite: ", return_tensors="pt")
with torch.cuda.amp.autocast():
output_tokens = model.generate(**batch, max_new_tokens=50)
print("\n\n", tokenizer.decode(output_tokens[0], skip_special_tokens=True))<jupyter_output>/home/marc/anaconda3/envs/accelerate/lib/python3.10/site-packages/transformers/generation/utils.py:1448: UserWarning: You are calling .generate() with the `input_ids` being on a device type different than your model's device. `input_ids` is on cpu, whereas the model is on cuda. You may experience unexpected behaviors or slower generation. Please make sure that you have put `input_ids` to the correct device by calling for example input_ids = input_ids.to('cuda') before running `.generate()`.
warnings.warn(
|
peft/examples/int8_training/Finetune_opt_bnb_peft.ipynb/0
|
{
"file_path": "peft/examples/int8_training/Finetune_opt_bnb_peft.ipynb",
"repo_id": "peft",
"token_count": 2755
}
| 188
|
<jupyter_start><jupyter_code>!pip install -q git+https://github.com/huggingface/transformers.git
!pip install -q git+https://github.com/huggingface/peft.git
!pip install -q git+https://github.com/huggingface/accelerate.git@main
!pip install huggingface_hub
!pip install bitsandbytes
!pip install SentencePiece
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from huggingface_hub import notebook_login
import torch
notebook_login()
from peft import PeftModel
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig, BitsAndBytesConfig
model_name = "decapoda-research/llama-7b-hf"
tokenizer = LlamaTokenizer.from_pretrained(model_name)
model = LlamaForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto", use_auth_token=True)
%%time
model = PeftModel.from_pretrained(model, "tloen/alpaca-lora-7b", adapter_name="eng_alpaca")
%%time
model.load_adapter("22h/cabrita-lora-v0-1", adapter_name="portuguese_alpaca")
model
model.to("cuda")
import torch
device = "cuda"
def generate_prompt(instruction, input=None):
if input:
return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{instruction}
### Input:
{input}
### Response:"""
else:
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
{instruction}
### Response:"""
def evaluate(
instruction,
input=None,
temperature=0.1,
top_p=0.75,
top_k=40,
num_beams=4,
max_new_tokens=256,
**kwargs,
):
prompt = generate_prompt(instruction, input)
inputs = tokenizer(prompt, return_tensors="pt")
input_ids = inputs["input_ids"].to(device)
generation_config = GenerationConfig(
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
no_repeat_ngram_size=3,
**kwargs,
)
with torch.no_grad():
generation_output = model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=max_new_tokens,
)
s = generation_output.sequences[0]
output = tokenizer.decode(s)
return output.split("### Response:")[1].strip()
%%time
model.set_adapter("eng_alpaca")
instruction = "Tell me about alpacas."
print(evaluate(instruction))
%%time
model.set_adapter("portuguese_alpaca")
instruction = "Invente uma desculpa criativa pra dizer que não preciso ir à festa."
print(evaluate(instruction))
with model.disable_adapter():
instruction = "Invente uma desculpa criativa pra dizer que não preciso ir à festa."
print(evaluate(instruction))<jupyter_output>I'm sorry, but I can't go to the party. I'm sick. I have a cold. I don't feel well. I need to stay at home and rest.
I have a lot of homework to do. My dog ate my homework. My homework is too hard. I didn't have time to do it. It's too late. I forgot about it.
My parents won't let me go. My parents are out of town. They're on vacation. They have to work. They are sick. They need to take care of my brother.
They're not home. They went to the grocery store. They took the car to the mechanic. They had to go to a meeting. They were in a hurry. They forgot about me.
Their car broke down. Their car ran out of gas. They got a flat tire. They couldn't find a parking space. They didn' t have enough money. They lost their wallet.
It's raining. The roads are icy. There's a blizzard. There are too many cars on the road. There was an accident.
|
peft/examples/multi_adapter_examples/PEFT_Multi_LoRA_Inference.ipynb/0
|
{
"file_path": "peft/examples/multi_adapter_examples/PEFT_Multi_LoRA_Inference.ipynb",
"repo_id": "peft",
"token_count": 1344
}
| 189
|
import argparse
import json
import os
from datetime import date
from pathlib import Path
from tabulate import tabulate
MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters
parser = argparse.ArgumentParser()
parser.add_argument(
"--slack_channel_name",
default="peft-ci-daily",
)
def main(slack_channel_name=None):
failed = []
passed = []
group_info = []
total_num_failed = 0
empty_file = False or len(list(Path().glob("*.log"))) == 0
total_empty_files = []
for log in Path().glob("*.log"):
section_num_failed = 0
i = 0
with open(log) as f:
for line in f:
line = json.loads(line)
i += 1
if line.get("nodeid", "") != "":
test = line["nodeid"]
if line.get("duration", None) is not None:
duration = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
else:
passed.append([test, duration, log.name.split("_")[0]])
empty_file = i == 0
group_info.append([str(log), section_num_failed, failed])
total_empty_files.append(empty_file)
os.remove(log)
failed = []
text = (
"🌞 There were no failures!"
if not any(total_empty_files)
else "Something went wrong there is at least one empty file - please check GH action results."
)
no_error_payload = {
"type": "section",
"text": {
"type": "plain_text",
"text": text,
"emoji": True,
},
}
message = ""
payload = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "🤗 Results of the {} PEFT scheduled tests.".format(os.environ.get("TEST_TYPE", "")),
},
},
]
if total_num_failed > 0:
for i, (name, num_failed, failed_tests) in enumerate(group_info):
if num_failed > 0:
if num_failed == 1:
message += f"*{name}: {num_failed} failed test*\n"
else:
message += f"*{name}: {num_failed} failed tests*\n"
failed_table = []
for test in failed_tests:
failed_table.append(test[0].split("::"))
failed_table = tabulate(
failed_table,
headers=["Test Location", "Test Case", "Test Name"],
showindex="always",
tablefmt="grid",
maxcolwidths=[12, 12, 12],
)
message += "\n```\n" + failed_table + "\n```"
if total_empty_files[i]:
message += f"\n*{name}: Warning! Empty file - please check the GitHub action job *\n"
print(f"### {message}")
else:
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
if len(message) > MAX_LEN_MESSAGE:
print(f"Truncating long message from {len(message)} to {MAX_LEN_MESSAGE}")
message = message[:MAX_LEN_MESSAGE] + "..."
if len(message) != 0:
md_report = {
"type": "section",
"text": {"type": "mrkdwn", "text": message},
}
payload.append(md_report)
action_button = {
"type": "section",
"text": {"type": "mrkdwn", "text": "*For more details:*"},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/peft/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
date_report = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
},
],
}
payload.append(date_report)
print(payload)
client = WebClient(token=os.environ.get("SLACK_API_TOKEN"))
client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload)
if __name__ == "__main__":
args = parser.parse_args()
main(args.slack_channel_name)
|
peft/scripts/log_reports.py/0
|
{
"file_path": "peft/scripts/log_reports.py",
"repo_id": "peft",
"token_count": 2521
}
| 190
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from .config import AdaLoraConfig
from .gptq import SVDQuantLinear
from .layer import AdaLoraLayer, RankAllocator, SVDLinear
from .model import AdaLoraModel
__all__ = ["AdaLoraConfig", "AdaLoraLayer", "AdaLoraModel", "SVDLinear", "RankAllocator", "SVDQuantLinear"]
def __getattr__(name):
if (name == "SVDLinear8bitLt") and is_bnb_available():
from .bnb import SVDLinear8bitLt
return SVDLinear8bitLt
if (name == "SVDLinear4bit") and is_bnb_4bit_available():
from .bnb import SVDLinear4bit
return SVDLinear4bit
raise AttributeError(f"module {__name__} has no attribute {name}")
|
peft/src/peft/tuners/adalora/__init__.py/0
|
{
"file_path": "peft/src/peft/tuners/adalora/__init__.py",
"repo_id": "peft",
"token_count": 429
}
| 191
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The implementation is based on "Parameter-Efficient Orthogonal Finetuning
# via Butterfly Factorization" (https://arxiv.org/abs/2311.06243) in ICLR 2024.
from __future__ import annotations
import math
import os
import warnings
from contextlib import contextmanager
from typing import Any, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
_FBD_CUDA = None
# this function is a 1:1 copy from accelerate
@contextmanager
def patch_environment(**kwargs):
"""
A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.
Will convert the values in `kwargs` to strings and upper-case all the keys.
Example:
```python
>>> import os
>>> from accelerate.utils import patch_environment
>>> with patch_environment(FOO="bar"):
... print(os.environ["FOO"]) # prints "bar"
>>> print(os.environ["FOO"]) # raises KeyError
```
"""
existing_vars = {}
for key, value in kwargs.items():
key = key.upper()
if key in os.environ:
existing_vars[key] = os.environ[key]
os.environ[key] = str(value)
yield
for key in kwargs:
key = key.upper()
if key in existing_vars:
# restore previous value
os.environ[key] = existing_vars[key]
else:
os.environ.pop(key, None)
def get_fbd_cuda():
global _FBD_CUDA
if _FBD_CUDA is not None:
return _FBD_CUDA
# This import initializes cuda context and should thus be local, see issue 1877
from torch.utils.cpp_extension import load
curr_dir = os.path.dirname(__file__)
# need ninja to build the extension
try:
with patch_environment(CC="gcc", CXX="gcc"):
fbd_cuda = load(
name="fbd_cuda",
sources=[f"{curr_dir}/fbd/fbd_cuda.cpp", f"{curr_dir}/fbd/fbd_cuda_kernel.cu"],
verbose=True,
# build_directory='/tmp/' # for debugging
)
# extra_cuda_cflags = ['-std=c++14', '-ccbin=$$(which gcc-7)']) # cuda10.2 is not compatible with gcc9. Specify gcc 7
except Exception as e:
warnings.warn(f"Failed to load the CUDA extension: {e}, check if ninja is available.")
warnings.warn("Setting boft_n_butterfly_factor to 1 to speed up the finetuning process.")
fbd_cuda = None
_FBD_CUDA = fbd_cuda
return _FBD_CUDA
class FastBlockDiag(Function):
"""
Implements a custom autograd Function for a fast block diagonal operation using CUDA.
This function is optimized for 4D tensors where the last two dimensions are equal, representing block diagonal
matrices for efficient computation on CUDA devices.
"""
@staticmethod
def forward(ctx, input):
"""
The forward method for FastBlockDiag.
Computes the block diagonal operation on the input tensor using a CUDA-optimized function. This method assumes
that the input is a 4D tensor where the last two dimensions are equal, which represent the blocks to be
diagonalized.
Parameters:
ctx: A context object that can be used to stash information for backward computation.
input (Tensor): The input tensor of shape (N, D, H, H), where `N` is the batch size,
`D` represents one additional dimension (In BOFT, the number of BOFT blocks), and `H` is the
size of the square blocks along the last two dimensions (In BOFT, the block size).
Returns:
Tensor: The resulting tensor after applying the block diagonal operation,
will have the shape (N, DxH, DxH).
"""
output = get_fbd_cuda().forward(input)[0]
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
grad_input = get_fbd_cuda().backward(grad_output, input)[0]
return grad_input
class MultiplicativeDropoutLayer(nn.Module):
"""
Implements the multiplicative dropout layer for BOFT.
"""
def __init__(self, p=0.0):
"""
Initializes the multiplicative dropout layer.
Parameters:
p (float): The probability of dropping out a block. Defaults to 0.0.
"""
super().__init__()
self.p = p
def forward(self, x):
"""
Applies multiplicative dropout to the input tensor.
Parameters:
x (Tensor): The input tensor of shape (N, D, H, H), where `N` is the batch size, `D` represents
one additional dimension (In BOFT, the number of BOFT blocks), and `H` is the size of the square
blocks along the last two dimensions (In BOFT, the block size).
"""
if self.training:
# Ensure the last two dimensions are the same
if x.shape[-1] != x.shape[-2]:
raise ValueError("The last two dimensions of input should be the same!")
N, D, H, _ = x.shape
# Randomly select one from N
n_random = torch.randint(0, N, (1,)).item()
# Create a mask with 1s for matrices to be replaced with identity and 0s otherwise
num_to_replace = int(self.p * D)
num_zeros = D - num_to_replace
# Generate a flat tensor with desired number of 1s and 0s
mask = torch.cat([torch.ones(num_to_replace, device=x.device), torch.zeros(num_zeros, device=x.device)])
# Shuffle and reshape the mask
mask = mask[torch.randperm(D)].view(1, D, 1, 1)
full_mask = torch.zeros(N, D, 1, 1, device=x.device)
full_mask[n_random] = mask
# Use the mask to combine original matrices and identity matrices
eye_matrix = torch.eye(H, device=x.device).repeat(N, D, 1, 1)
x = (1 - full_mask) * x + full_mask * eye_matrix
return x
class BOFTLayer(BaseTunerLayer):
"""
Implements the BOFT layer.
"""
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("boft_R", "boft_s")
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("boft_block_size", "boft_block_num", "boft_dropout")
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
"""
Initializes the BOFT layer.
Note, currently only support linear layer and convolutional layer, with further support for other layers to be
added soon.
Parameters:
base_layer: the pretrained model layer
"""
self.base_layer = base_layer
self.boft_block_size = {}
self.boft_block_num = {}
self.boft_dropout = nn.ModuleDict({})
self.boft_R = nn.ParameterDict({})
self.boft_s = nn.ParameterDict({})
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
in_features, out_features = base_layer.in_features, base_layer.out_features
elif isinstance(base_layer, nn.Conv2d):
in_features, out_features = base_layer.in_channels, base_layer.out_channels
else:
raise ValueError(f"Unsupported layer type {type(base_layer)}")
self.in_features = in_features
self.out_features = out_features
def set_scale(self, adapter, scale):
if adapter not in self.scaling:
# Ignore the case where the adapter is not in the layer
return
warnings.warn("Scaling operation for BOFT not supported! Automatically set scale to 1.")
def scale_layer(self, scale: float) -> None:
if scale == 1:
return
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
warnings.warn("Scaling operation for BOFT not supported! Automatically set scale to 1.")
def unscale_layer(self, scale=None) -> None:
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
warnings.warn("Unscaling operation for BOFT not supported! Keeping scale to 1.")
def update_layer(
self, adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights
):
"""
Update the linear layer with trainable BOFT weights. Override for other layer types.
"""
# to be consistent with the paper notation
boft_n_butterfly_factor = boft_n_butterfly_factor - 1
if boft_n_butterfly_factor < 0:
raise ValueError(
f"You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor+1} to be a positive integer number."
)
# Initialize the MultiplicativeDropoutLayer for boft_dropout > 0.0.
if boft_dropout > 0.0:
boft_dropout_layer = MultiplicativeDropoutLayer(p=boft_dropout)
else:
boft_dropout_layer = nn.Identity()
self.boft_dropout.update(nn.ModuleDict({adapter_name: boft_dropout_layer}))
if boft_block_size == 0 and boft_block_num != 0:
if self.in_features % boft_block_num != 0:
raise ValueError(
f"in_features ({self.in_features}) must be divisible by boft_block_num ({boft_block_num})!"
)
if boft_n_butterfly_factor != 0:
if boft_n_butterfly_factor > int(math.log2(boft_block_num)):
raise ValueError(
f"Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_num ({boft_block_num})!"
)
if boft_block_num % (2**boft_n_butterfly_factor) != 0:
raise ValueError(
f"boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor+1})!"
)
boft_block_size = int(self.in_features // boft_block_num)
elif boft_block_size != 0 and boft_block_num == 0:
if self.in_features % boft_block_size != 0:
raise ValueError(
f"in_features ({self.in_features}) must be divisible by boft_block_size ({boft_block_size})!"
)
if boft_n_butterfly_factor != 0:
if self.in_features < (boft_block_size * (2**boft_n_butterfly_factor)):
raise ValueError(
f"Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_size ({boft_block_size})!"
)
if self.in_features % (boft_block_size * (2**boft_n_butterfly_factor)) != 0:
raise ValueError(
f"Invalid combination of in_features ({self.in_features}), boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_size ({boft_block_size})!"
)
boft_block_num = int(self.in_features // boft_block_size)
else:
raise ValueError(
f"You can only specify either boft_block_size ({boft_block_size}) or boft_block_num ({boft_block_num}), but not both simultaneously or setting both"
"to be 0, because boft_block_size x boft_block_num != in_features."
)
# In OFT you can specify the number of blocks to be 1
if boft_n_butterfly_factor != 0:
if boft_block_num % 2 != 0:
raise ValueError(f"boft_block_num ({boft_block_num}) must be an even number!")
if boft_block_size % 2 != 0:
raise ValueError(f"boft_block_size ({boft_block_size}) must be an even number!")
# If there is no butterfly factor, then permutation matrix P will be an identity matrix.
P = torch.empty((boft_n_butterfly_factor + 1, self.in_features, self.in_features))
for i in range(boft_n_butterfly_factor + 1):
perm = self.block_butterfly_perm(
self.in_features, int(boft_block_num / (2 ** (i))), int(boft_block_size / 2), boft_n_butterfly_factor
)
perm_mat = self.perm2mat(perm)
P[i] = perm_mat
self.register_buffer("boft_P", P)
self.boft_R[adapter_name] = nn.Parameter(
torch.zeros(boft_n_butterfly_factor + 1, boft_block_num, boft_block_size, boft_block_size)
)
self.boft_s[adapter_name] = nn.Parameter(torch.ones(int(self.out_features), 1))
self.reset_boft_parameters(adapter_name, init_weights)
# set the boft block size and number
self.boft_block_size[adapter_name] = boft_block_size
self.boft_block_num[adapter_name] = boft_block_num
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def reset_boft_parameters(self, adapter_name, init_weights):
"""
Reset the BOFT parameters.
"""
if init_weights is False:
nn.init.normal_(self.boft_R[adapter_name], mean=0.0, std=0.1)
nn.init.normal_(self.boft_s[adapter_name], mean=1.0, std=0.1)
return
if adapter_name in self.boft_R.keys():
if init_weights is True:
# initialize R to zero
nn.init.zeros_(self.boft_R[adapter_name])
nn.init.ones_(self.boft_s[adapter_name])
else:
raise ValueError(f"Unknown initialization {init_weights=}")
def perm2mat(self, indices):
"""
Convert permutation indices to permutation matrix.
Args:
indices: A list of indices representing the permutation.
"""
# Number of indices determines the size of the square matrix
n = len(indices)
# Initialize a matrix of zeros
perm_mat = torch.zeros((n, n))
# Set the 1s according to the indices
for i, idx in enumerate(indices):
perm_mat[i, idx] = 1
return perm_mat
def block_butterfly_perm(self, n, b, r=3, n_butterfly_factor=1):
"""
Define the permutation matrix for the block butterfly permutation.
Args:
n: size of the permutation matrix
b: desired number of blocks after multiplying with the permutation matrix
r: base block size of the block diagonal matrix, e.g. 2x2, 3x3, 5x5 etc.
"""
if n_butterfly_factor == 0:
return torch.arange(n)
if b * r * 2 > n:
raise ValueError("Invalid number of blocks!")
block_size = int(n // b)
indices = torch.arange(n)
def sort_block(b, r):
step = b / r
initial_order = torch.arange(b)
sorted_order = torch.empty(b, dtype=torch.long)
evens = torch.arange(0, step, 2)
odds = torch.arange(1, step, 2)
sorted_seq = torch.cat((evens, odds), dim=0)
for i, pos in enumerate(sorted_seq):
sorted_order[int(i * r) : int(i * r + r)] = initial_order[int(pos * r) : int(pos * r + r)]
return sorted_order
sorted_order = sort_block(block_size, r)
for i in range(0, n, block_size):
block_end = i + block_size
tmp_indices = indices[i:block_end]
indices[i:block_end] = tmp_indices[sorted_order]
return indices
def cayley_batch(self, data):
"""
Perform the Cayley parametrization on a batch of skew-symmetric matrices.
Args:
data: A batch of skew-symmetric matrices of shape (b, r, c).
"""
b, r, c = data.shape
# Ensure the input matrix is skew-symmetric
skew_mat = 0.5 * (data - data.transpose(1, 2))
id_mat = torch.eye(r, device=data.device).unsqueeze(0).expand(b, r, c)
# Perform the Cayley parametrization
Q = torch.linalg.solve(id_mat + skew_mat, id_mat - skew_mat, left=False)
return Q
class Linear(nn.Module, BOFTLayer):
"""
BOFT implemented in a dense layer.
"""
def __init__(
self,
base_layer,
adapter_name: str,
boft_block_size: int = 8,
boft_block_num: int = 0,
boft_n_butterfly_factor: int = 0,
boft_dropout: float = 0.1,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
init_weights: Union[bool, str] = True,
is_target_conv_1d_layer: bool = False,
**kwargs,
) -> None:
super().__init__()
BOFTLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
# Attempt to load the CUDA extension during model initialization
if not get_fbd_cuda():
self.fbd_cuda_available = False
# If the CUDA extension is not available, set the butterfly factor to 1 to speed up the finetuning process
boft_n_butterfly_factor = 1
else:
self.fbd_cuda_available = True
self.update_layer(
adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights
)
self.is_target_conv_1d_layer = is_target_conv_1d_layer
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.boft_R.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weight = base_layer.weight.data.clone()
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = orig_weight * boft_s
if not torch.isfinite(orig_weight).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.base_layer.weight.data = orig_weight.contiguous()
else:
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = base_layer.weight.data.clone()
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = orig_weight * boft_s
self.base_layer.weight.data = orig_weight.contiguous()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.boft_R.keys():
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = self.get_base_layer().weight.data.clone()
orig_weight = torch.transpose(orig_weight, 0, 1)
orig_weight = torch.mm(butterfly_oft_mat.t(), orig_weight)
orig_weight = torch.transpose(orig_weight, 0, 1)
self.get_base_layer().weight.data = orig_weight * (1 / boft_s)
def get_delta_weight(self, adapter) -> tuple[torch.Tensor, torch.Tensor]:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
boft_R = self.boft_R[adapter]
boft_s = self.boft_s[adapter]
N, D, H, _ = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
boft_P = self.boft_P.to(block_diagonal_butterfly.device)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
return butterfly_oft_mat, boft_s
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
boft_rotation = torch.eye(self.in_features, device=x.device, dtype=previous_dtype)
boft_scale = torch.ones((int(self.out_features), 1), device=x.device, dtype=previous_dtype)
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
boft_R = self.boft_R[active_adapter]
boft_s = self.boft_s[active_adapter]
dropout = self.boft_dropout[active_adapter]
N, D, H, _ = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
orth_rotate_butterfly = dropout(orth_rotate_butterfly)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
# The BOFT author's cayley_batch, dropout and FastBlockDiag ONLY return fp32 outputs.
boft_P = self.boft_P.to(x)
block_diagonal_butterfly = block_diagonal_butterfly.to(x)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
boft_rotation = butterfly_oft_mat @ boft_rotation
boft_scale = boft_s * boft_scale
x = x.to(self.get_base_layer().weight.data.dtype)
orig_weight = self.get_base_layer().weight.data
orig_weight = torch.transpose(orig_weight, 0, 1)
boft_rotation = boft_rotation.to(previous_dtype)
orig_weight = orig_weight.to(previous_dtype)
rotated_weight = torch.mm(boft_rotation, orig_weight)
rotated_weight = torch.transpose(rotated_weight, 0, 1)
scaled_rotated_weight = rotated_weight * boft_scale
scaled_rotated_weight = scaled_rotated_weight.to(previous_dtype)
if self.base_layer.bias is not None:
self.base_layer.bias = self.base_layer.bias.to(previous_dtype)
result = F.linear(input=x, weight=scaled_rotated_weight, bias=self.base_layer.bias)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "boft." + rep
class Conv2d(nn.Module, BOFTLayer):
"""
BOFT implemented in a Conv2d layer.
"""
def __init__(
self,
base_layer: nn.Module,
adapter_name: str,
boft_block_size: int = 8,
boft_block_num: int = 0,
boft_n_butterfly_factor: int = 0,
boft_dropout: float = 0.1,
init_weights: Union[bool, str] = True,
**kwargs,
) -> None:
super().__init__()
BOFTLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
# Attempt to load the CUDA extension during model initialization
if not get_fbd_cuda():
self.fbd_cuda_available = False
# If the CUDA extension is not available, set the butterfly factor to 1 to speed up the finetuning process
boft_n_butterfly_factor = 1
else:
self.fbd_cuda_available = True
self.update_layer(
adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights
)
def update_layer(
self, adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights
):
"""
Update the conv2d layer with trainable BOFT weights.
"""
# to be consistent with the paper notation
boft_n_butterfly_factor = boft_n_butterfly_factor - 1
if boft_n_butterfly_factor < 0:
raise ValueError(
f"You can only specify boft_n_butterfly_factor {boft_n_butterfly_factor+1} to be a positive integer number."
)
# Initialize the MultiplicativeDropoutLayer for boft_dropout > 0.0.
if boft_dropout > 0.0:
boft_dropout_layer = MultiplicativeDropoutLayer(p=boft_dropout)
else:
boft_dropout_layer = nn.Identity()
self.boft_dropout.update(nn.ModuleDict({adapter_name: boft_dropout_layer}))
# layer information from the base layer
base_layer = self.get_base_layer()
conv_filter_dim = self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0]
# Initialize the BOFT parameters.
if not (boft_block_size != 0) ^ (boft_block_num != 0):
raise ValueError(
f"You can only specify either boft_block_size ({boft_block_size}) or boft_block_num ({boft_block_num}), but not both simultaneously, because boft_block_size x boft_block_num != in_features."
)
if boft_block_size == 0 and boft_block_num != 0:
if conv_filter_dim % boft_block_num != 0:
raise ValueError(
f"Convolutional kernel dimension ({conv_filter_dim}) must be divisible by boft_block_num ({boft_block_num})!"
)
if boft_n_butterfly_factor != 0:
if boft_n_butterfly_factor > int(math.log2(boft_block_num)):
raise ValueError(
f"Invalid combination of boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_num ({boft_block_num})!"
)
if boft_block_num % (2**boft_n_butterfly_factor) != 0:
raise ValueError(
f"boft_block_num ({boft_block_num}) must be a multiple of 2 raised to the power of boft_n_butterfly_factor ({boft_n_butterfly_factor+1})!"
)
boft_block_size = int(conv_filter_dim // boft_block_num)
elif boft_block_size != 0 and boft_block_num == 0:
if conv_filter_dim % boft_block_size != 0:
raise ValueError(
f"Convolutional kernel dimension ({conv_filter_dim}) must be divisible by boft_block_size ({boft_block_size})!"
)
if boft_n_butterfly_factor != 0:
if conv_filter_dim < (boft_block_size * (2**boft_n_butterfly_factor)):
raise ValueError(
f"Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_size ({boft_block_size})!"
)
if conv_filter_dim % (boft_block_size * (2**boft_n_butterfly_factor)) != 0:
raise ValueError(
f"Invalid combination of convolutional kernel dimension ({conv_filter_dim}), boft_n_butterfly_factor ({boft_n_butterfly_factor+1}) and boft_block_size ({boft_block_size})!"
)
boft_block_num = int(conv_filter_dim // boft_block_size)
else:
raise ValueError("Unknown error!")
# In OFT you can specify the number of blocks to be 1
if boft_n_butterfly_factor != 0:
if boft_block_num % 2 != 0:
raise ValueError(f"boft_block_num ({boft_block_num}) must be an even number!")
if boft_block_size % 2 != 0:
raise ValueError(f"boft_block_size ({boft_block_size}) must be an even number!")
# If there is no butterfly factor, then permutation matrix P will be an identity matrix.
P = torch.empty((boft_n_butterfly_factor + 1, conv_filter_dim, conv_filter_dim))
for i in range(boft_n_butterfly_factor + 1):
perm = self.block_butterfly_perm(
conv_filter_dim, int(boft_block_num / (2 ** (i))), int(boft_block_size / 2), boft_n_butterfly_factor
)
perm_mat = self.perm2mat(perm)
P[i] = perm_mat
self.register_buffer("boft_P", P)
self.boft_R[adapter_name] = nn.Parameter(
torch.zeros(boft_n_butterfly_factor + 1, boft_block_num, boft_block_size, boft_block_size)
)
self.boft_s[adapter_name] = nn.Parameter(torch.ones(1, int(self.out_features)))
self.reset_boft_parameters(adapter_name, init_weights)
# set the boft block size and number
self.boft_block_size[adapter_name] = boft_block_size
self.boft_block_num[adapter_name] = boft_block_num
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.boft_R.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weight = base_layer.weight.data.clone()
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = orig_weight.view(
self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0], self.out_features
)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = orig_weight * boft_s
orig_weight = orig_weight.view(
self.out_features, self.in_features, base_layer.kernel_size[0], base_layer.kernel_size[0]
)
self.base_layer.weight.data = orig_weight.contiguous()
else:
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = base_layer.weight.data.clone()
orig_weight = orig_weight.view(
self.in_features * base_layer.kernel_size[0] * base_layer.kernel_size[0], self.out_features
)
orig_weight = torch.mm(butterfly_oft_mat, orig_weight)
orig_weight = orig_weight * boft_s
orig_weight = orig_weight.view(
self.out_features, self.in_features, base_layer.kernel_size[0], base_layer.kernel_size[0]
)
self.base_layer.weight.data = orig_weight.contiguous()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.boft_R.keys():
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
orig_weight = self.get_base_layer().weight.data.clone()
orig_weight = orig_weight.view(
self.in_features * self.get_base_layer().kernel_size[0] * self.get_base_layer().kernel_size[0],
self.out_features,
)
orig_weight = torch.mm(butterfly_oft_mat.t(), orig_weight)
orig_weight = orig_weight * (1 / boft_s)
orig_weight = orig_weight.view(
self.out_features,
self.in_features,
self.get_base_layer().kernel_size[0],
self.get_base_layer().kernel_size[0],
)
self.get_base_layer().weight.data = orig_weight
def get_delta_weight(self, adapter) -> tuple[torch.Tensor, torch.Tensor]:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
boft_R = self.boft_R[adapter]
boft_s = self.boft_s[adapter]
N, D, H, _ = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
boft_P = self.boft_P.to(block_diagonal_butterfly.device)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
return butterfly_oft_mat, boft_s
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
boft_rotation = torch.eye(
self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0],
device=x.device,
dtype=x.dtype,
)
boft_scale = torch.ones((1, int(self.out_features)), device=x.device, dtype=x.dtype)
for active_adapter in self.active_adapters:
if active_adapter not in self.boft_R.keys():
continue
boft_R = self.boft_R[active_adapter]
boft_s = self.boft_s[active_adapter]
dropout = self.boft_dropout[active_adapter]
N, D, H, _ = boft_R.shape
boft_R = boft_R.view(N * D, H, H)
orth_rotate_butterfly = self.cayley_batch(boft_R)
orth_rotate_butterfly = orth_rotate_butterfly.view(N, D, H, H)
orth_rotate_butterfly = dropout(orth_rotate_butterfly)
if self.fbd_cuda_available:
block_diagonal_butterfly = FastBlockDiag.apply(orth_rotate_butterfly)
else:
orth_rotate_butterfly = orth_rotate_butterfly.squeeze(0)
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
boft_P = self.boft_P.to(x)
block_diagonal_butterfly = block_diagonal_butterfly.to(x)
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
butterfly_oft_mat = butterfly_oft_mat_batch[0]
for i in range(1, butterfly_oft_mat_batch.shape[0]):
butterfly_oft_mat = butterfly_oft_mat_batch[i] @ butterfly_oft_mat
boft_rotation = butterfly_oft_mat @ boft_rotation
boft_scale = boft_s * boft_scale
x = x.to(self.base_layer.weight.data.dtype)
orig_weight = self.base_layer.weight.data
orig_weight = orig_weight.view(
self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0],
self.out_features,
)
rotated_weight = torch.mm(boft_rotation, orig_weight)
scaled_rotated_weight = rotated_weight * boft_scale
scaled_rotated_weight = scaled_rotated_weight.view(
self.out_features, self.in_features, self.base_layer.kernel_size[0], self.base_layer.kernel_size[0]
)
result = F.conv2d(
input=x,
weight=scaled_rotated_weight,
bias=self.base_layer.bias,
padding=self.base_layer.padding[0],
stride=self.base_layer.stride[0],
)
result = result.to(previous_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "boft." + rep
|
peft/src/peft/tuners/boft/layer.py/0
|
{
"file_path": "peft/src/peft/tuners/boft/layer.py",
"repo_id": "peft",
"token_count": 19628
}
| 192
|
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class LNTuningConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a :class:`~peft.tuners.LNTuningModel`.
Args:
target_modules (`Optional[Union[List[str], str]]`):
List of module names or regex expression of the module names to replace with LNTuning. For example,
'.*decoder.*' or '.*encoder.*'. If this is not specified, modules will be chosen according to the model
architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
the target modules manually.
modules_to_save (`Optional[Union[List[str], str]]`):
List of modules to be set as trainable and saved in the final checkpoint. For example, in Sequence
Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized
and as such need to be trainable and saved.
"""
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to replace with LNTuning."
"For example, '.*decoder.*' or '.*encoder.*'. "
"If not specified, modules will be chosen according to the model architecture, If the architecture is "
"not known, an error will be raised -- in this case, you shoud specify the target modules manually."
),
},
)
modules_to_save: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": "List of modules to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, "
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
},
)
def __post_init__(self):
self.peft_type = PeftType.LN_TUNING
|
peft/src/peft/tuners/ln_tuning/config.py/0
|
{
"file_path": "peft/src/peft/tuners/ln_tuning/config.py",
"repo_id": "peft",
"token_count": 950
}
| 193
|
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch import nn
from peft.utils.integrations import dequantize_module_weight, gather_params_ctx
from peft.utils.other import transpose
class DoraLinearLayer(nn.Module):
def __init__(self, fan_in_fan_out):
super().__init__()
self.fan_in_fan_out = fan_in_fan_out
def get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor:
# calculate L2 norm of weight matrix, column-wise
weight = transpose(weight, self.fan_in_fan_out)
weight = weight + scaling * lora_weight
weight_norm = torch.linalg.norm(weight, dim=1).to(weight.dtype)
return weight_norm
def update_layer(self, *, base_layer, lora_A, lora_B, scaling, place_on_cpu=False) -> None:
# temporarily convert fp16 to fp32, as fp16 can cause trouble on CPU with PyTorch < 2.2
dtype_is_fp16 = lora_A.dtype == torch.float16
if dtype_is_fp16:
lora_A = lora_A.float()
lora_B = lora_B.float()
with gather_params_ctx(base_layer.parameters()):
if base_layer.__class__.__name__ == "Linear4bit":
# We have to create a copy of the base layer, otherwise, FSDP will throw an error. 8bit does not work
# yet because Int8Params cannot be correctly deep-copied (attributes vanish)
base_layer = deepcopy(base_layer)
weight = dequantize_module_weight(base_layer)
if weight.data.ndim == 4: # For handling LoRAs applied to Conv2Ds.
lora_weight = torch.mm(lora_B.flatten(start_dim=1), lora_A.flatten(start_dim=1))
lora_weight = lora_weight.reshape(weight.shape)
else:
lora_weight = lora_B @ lora_A
if dtype_is_fp16:
lora_weight = lora_weight.half()
weight_norm = self.get_weight_norm(weight.to(lora_A.device), lora_weight, scaling)
if place_on_cpu:
weight_norm = weight_norm.to("cpu")
self.weight = nn.Parameter(weight_norm, requires_grad=True)
def forward(self, x, *, lora_A, lora_B, scaling, base_layer):
"""
For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer
output.
"""
lora_result = lora_B(lora_A(x))
# Don't use `lora_weight = lora_B.weight @ lora_A.weight` because this causes errors with FSDP. Instead,
# calculate the same but using forward.
x_eye = torch.eye(lora_A.weight.shape[1], device=lora_A.weight.device, dtype=x.dtype)
lora_weight = lora_B(lora_A(x_eye)).T
magnitude = self.weight
weight = dequantize_module_weight(base_layer)
weight = weight.to(x.dtype)
weight_norm = self.get_weight_norm(weight, lora_weight.detach(), scaling)
# see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353)
# "[...] we suggest treating ||V +∆V ||_c in
# Eq. (5) as a constant, thereby detaching it from the gradient
# graph. This means that while ||V + ∆V ||_c dynamically
# reflects the updates of ∆V , it won’t receive any gradient
# during backpropagation"
weight_norm = weight_norm.detach()
mag_norm_scale = (magnitude / weight_norm).view(1, -1)
result_dora = (mag_norm_scale - 1) * (
F.linear(x, transpose(weight, self.fan_in_fan_out))
) + mag_norm_scale * lora_result * scaling
# Note: Computation could potentially be accelerated by using the code below instead of calculating X@W again.
# This is only correct if dropout=0, otherwise results will differ:
# https://github.com/huggingface/peft/pull/1474#issuecomment-1964682771
# bias = self.get_base_layer().bias
# if bias is not None:
# result = result - bias
# result = mag_norm_scale * result + mag_norm_scale * lora_B(lora_A(x)) * scaling
# if bias is not None:
# result = result + bias
return result_dora
def __repr__(self) -> str:
rep = super().__repr__()
return "lora.dora." + rep
class DoraEmbeddingLayer(DoraLinearLayer):
def forward(self, x, *, lora_A, lora_B, scaling, base_layer, embed_fn):
"""
For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer
output.
"""
lora_weight = (lora_A @ lora_B).T
magnitude = self.weight
weight = base_layer.weight
weight_norm = self.get_weight_norm(weight, lora_weight.detach(), scaling)
# see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353)
# "[...] we suggest treating ||V +∆V ||_c in
# Eq. (5) as a constant, thereby detaching it from the gradient
# graph. This means that while ||V + ∆V ||_c dynamically
# reflects the updates of ∆V , it won’t receive any gradient
# during backpropagation"
weight_norm = weight_norm.detach()
mag_norm_scale = magnitude / weight_norm
result_dora = mag_norm_scale * (embed_fn(x, lora_A) @ lora_B) * scaling
return mag_norm_scale, result_dora
def __repr__(self) -> str:
rep = super().__repr__()
return "lora.dora." + rep
class DoraConv2dLayer(DoraLinearLayer):
def get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor:
# calculate L2 norm of weight matrix, column-wise
weight = weight + scaling * lora_weight
# the following is needed to have compatibility with the 4D weight tensors of Conv2D
weight_norm = weight.norm(p=2, dim=(1, 2, 3), keepdim=True).transpose(1, 0)
return weight_norm
def forward(self, x, *, lora_A, lora_B, scaling, base_layer):
"""
For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer
output.
"""
weight = base_layer.weight
lora_weight = torch.mm(lora_B.weight.flatten(start_dim=1), lora_A.weight.flatten(start_dim=1))
lora_weight = lora_weight.reshape(weight.shape)
magnitude = self.weight
weight_norm = self.get_weight_norm(weight, lora_weight.detach(), scaling)
# see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353)
# "[...] we suggest treating ||V +∆V ||_c in
# Eq. (5) as a constant, thereby detaching it from the gradient
# graph. This means that while ||V + ∆V ||_c dynamically
# reflects the updates of ∆V , it won’t receive any gradient
# during backpropagation"
weight_norm = weight_norm.detach()
mag_norm_scale = magnitude / weight_norm
result_dora = (mag_norm_scale - 1) * (
F.conv2d(
x,
weight,
bias=None,
stride=base_layer.stride,
padding=base_layer.padding,
dilation=base_layer.dilation,
groups=base_layer.groups,
)
) + mag_norm_scale * lora_B(lora_A(x)) * scaling
return result_dora
def __repr__(self) -> str:
rep = super().__repr__()
return "lora.dora." + rep
|
peft/src/peft/tuners/lora/dora.py/0
|
{
"file_path": "peft/src/peft/tuners/lora/dora.py",
"repo_id": "peft",
"token_count": 3356
}
| 194
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Dict, Type, Union
import torch
from torch import nn
from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner
from .layer import Conv2d, Linear, OFTLayer
class OFTModel(LycorisTuner):
"""
Creates Orthogonal Finetuning model from a pretrained model. The method is described in
https://arxiv.org/abs/2306.07280
Args:
model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached.
config ([`OFTConfig`]): The configuration of the OFT model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
Returns:
`torch.nn.Module`: The OFT model.
Example:
```py
>>> from diffusers import StableDiffusionPipeline
>>> from peft import OFTModel, OFTConfig
>>> config_te = OFTConfig(
... r=8,
... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
... module_dropout=0.0,
... init_weights=True,
... )
>>> config_unet = OFTConfig(
... r=8,
... target_modules=[
... "proj_in",
... "proj_out",
... "to_k",
... "to_q",
... "to_v",
... "to_out.0",
... "ff.net.0.proj",
... "ff.net.2",
... ],
... module_dropout=0.0,
... init_weights=True,
... )
>>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
>>> model.text_encoder = OFTModel(model.text_encoder, config_te, "default")
>>> model.unet = OFTModel(model.unet, config_unet, "default")
```
**Attributes**:
- **model** ([`~torch.nn.Module`]) -- The model to be adapted.
- **peft_config** ([`OFTConfig`]): The configuration of the OFT model.
"""
prefix: str = "oft_"
layers_mapping: Dict[Type[torch.nn.Module], Type[OFTLayer]] = {
torch.nn.Conv2d: Conv2d,
torch.nn.Linear: Linear,
}
def _create_and_replace(
self,
config: LycorisConfig,
adapter_name: str,
target: Union[OFTLayer, nn.Module],
target_name: str,
parent: nn.Module,
current_key: str,
) -> None:
"""
A private method to create and replace the target module with the adapter module.
"""
# Regexp matching - Find key which matches current target_name in patterns provided
pattern_keys = list(config.rank_pattern.keys())
target_name_key = next(filter(lambda key: re.match(rf"(.*\.)?{key}$", current_key), pattern_keys), target_name)
kwargs = config.to_dict()
kwargs["r"] = config.rank_pattern.get(target_name_key, config.r)
if isinstance(target, OFTLayer):
target.update_layer(adapter_name, **kwargs)
else:
new_module = self._create_new_module(config, adapter_name, target, **kwargs)
self._replace_module(parent, target_name, new_module, target)
|
peft/src/peft/tuners/oft/model.py/0
|
{
"file_path": "peft/src/peft/tuners/oft/model.py",
"repo_id": "peft",
"token_count": 1600
}
| 195
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
import warnings
from typing import Optional
import huggingface_hub
import torch
from huggingface_hub import file_exists, hf_hub_download
from huggingface_hub.utils import EntryNotFoundError, LocalEntryNotFoundError
from packaging import version
from safetensors.torch import load_file as safe_load_file
from .other import (
EMBEDDING_LAYER_NAMES,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
check_file_exists_on_hf_hub,
infer_device,
)
from .peft_types import PeftType
def has_valid_embedding_base_layer(layer):
"""Check if the layer has an embedding base layer"""
return hasattr(layer, "base_layer") and isinstance(layer.base_layer, (torch.nn.Linear, torch.nn.Embedding))
def get_embedding_layer_name(model, layer, is_embedding_in_target_modules):
"""Get the name of the embedding module for a given layer."""
for name, module in model.named_modules():
if (not is_embedding_in_target_modules and module == layer) or module == getattr(layer, "base_layer", None):
return name
return None
def get_peft_model_state_dict(
model, state_dict=None, adapter_name="default", unwrap_compiled=False, save_embedding_layers="auto"
):
"""
Get the state dict of the Peft model.
Args:
model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP,
the model should be the underlying model/unwrapped model (i.e. model.module).
state_dict (`dict`, *optional*, defaults to `None`):
The state dict of the model. If not provided, the state dict of the passed model will be used.
adapter_name (`str`, *optional*, defaults to `"default"`):
The name of the adapter whose state dict should be returned.
unwrap_compiled (`bool`, *optional*, defaults to `False`):
Whether to unwrap the model if torch.compile was used.
save_embedding_layers (`Union[bool, str]`, , *optional*, defaults to `auto`):
If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding
layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. Based on it
sets the boolean flag. This only works for 🤗 transformers models.
"""
if unwrap_compiled:
model = getattr(model, "_orig_mod", model)
config = model.peft_config[adapter_name]
if state_dict is None:
state_dict = model.state_dict()
# TUNER SPECIFIC CODE
if config.peft_type in (PeftType.LORA, PeftType.ADALORA):
# to_return = lora_state_dict(model, bias=model.peft_config.bias)
# adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py`
# to be used directly with the state dict which is necessary when using DeepSpeed or FSDP
bias = config.bias
if bias == "none":
to_return = {k: state_dict[k] for k in state_dict if "lora_" in k}
elif bias == "all":
to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k}
elif bias == "lora_only":
to_return = {}
for k in state_dict:
if "lora_" in k:
to_return[k] = state_dict[k]
bias_name = k.split("lora_")[0] + "bias"
if bias_name in state_dict:
to_return[bias_name] = state_dict[bias_name]
else:
raise NotImplementedError
to_return = {k: v for k, v in to_return.items() if (("lora_" in k and adapter_name in k) or ("bias" in k))}
if config.peft_type == PeftType.ADALORA:
rank_pattern = config.rank_pattern
if rank_pattern is not None:
rank_pattern = {k.replace(f".{adapter_name}", ""): v for k, v in rank_pattern.items()}
config.rank_pattern = rank_pattern
to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name)
if config.use_dora:
# Here we take care of a refactor of DoRA which changed lora_magnitude_vector from a ParameterDict to a
# ModuleDict with a DoraLayer instance. The old parameter is now the "weight" attribute of that layer. Since
# we want the state_dict format not to change, we remove the "weight" part.
new_dora_suffix = f"lora_magnitude_vector.{adapter_name}.weight"
def renamed_dora_weights(k):
if k.endswith(new_dora_suffix):
k = k[:-7] # remove ".weight"
return k
to_return = {renamed_dora_weights(k): v for k, v in to_return.items()}
elif config.peft_type == PeftType.BOFT:
bias = config.bias
if bias == "none":
to_return = {k: state_dict[k] for k in state_dict if "boft_" in k}
elif bias == "all":
to_return = {k: state_dict[k] for k in state_dict if "boft_" in k or "bias" in k}
elif bias == "boft_only":
to_return = {}
for k in state_dict:
if "boft_" in k:
to_return[k] = state_dict[k]
bias_name = k.split("boft_")[0] + "bias"
if bias_name in state_dict:
to_return[bias_name] = state_dict[bias_name]
else:
raise NotImplementedError
elif config.peft_type == PeftType.LOHA:
to_return = {k: state_dict[k] for k in state_dict if "hada_" in k}
elif config.peft_type == PeftType.LOKR:
to_return = {k: state_dict[k] for k in state_dict if "lokr_" in k}
elif config.peft_type == PeftType.ADAPTION_PROMPT:
to_return = {k: state_dict[k] for k in state_dict if k.split(".")[-1].startswith("adaption_")}
elif config.is_prompt_learning:
to_return = {}
if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
to_return["prefix_task_cols"] = model.prompt_encoder[adapter_name].prefix_task_cols
to_return["prefix_task_rows"] = model.prompt_encoder[adapter_name].prefix_task_rows
prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight
else:
if config.inference_mode:
prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight
else:
prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name)
to_return["prompt_embeddings"] = prompt_embeddings
elif config.peft_type == PeftType.IA3:
to_return = {k: state_dict[k] for k in state_dict if "ia3_" in k}
elif config.peft_type == PeftType.OFT:
to_return = {k: state_dict[k] for k in state_dict if "oft_" in k}
elif config.peft_type == PeftType.POLY:
to_return = {k: state_dict[k] for k in state_dict if "poly_" in k}
elif config.peft_type == PeftType.LN_TUNING:
to_return = {k: state_dict[k] for k in state_dict if "ln_tuning_" in k}
elif config.peft_type == PeftType.VERA:
to_return = {k: state_dict[k] for k in state_dict if "vera_lambda_" in k}
if config.save_projection:
# TODO: adding vera_A and vera_B to `self.get_base_layer` would
# make name to match here difficult to predict.
if f"base_model.vera_A.{adapter_name}" not in state_dict:
raise ValueError(
"Model was initialised to not save vera_A and vera_B but config now specifies to save projection!"
" Set `config.save_projection` to `False`."
)
to_return["base_model.vera_A." + adapter_name] = state_dict["base_model.vera_A." + adapter_name]
to_return["base_model.vera_B." + adapter_name] = state_dict["base_model.vera_B." + adapter_name]
elif config.peft_type == PeftType.FOURIERFT:
to_return = {k: state_dict[k] for k in state_dict if "fourierft_" in k}
elif config.peft_type == PeftType.XLORA:
to_return = {k: state_dict[k] for k in state_dict if "internal_xlora_classifier" in k}
elif config.peft_type == PeftType.HRA:
to_return = {k: state_dict[k] for k in state_dict if "hra_" in k}
else:
raise ValueError(f"Unknown PEFT type passed: {config.peft_type}")
# MODULES TO SAVE
if getattr(model, "modules_to_save", None) is not None:
for key, value in state_dict.items():
if any(f"{module_name}.modules_to_save.{adapter_name}" in key for module_name in model.modules_to_save):
to_return[key.replace("modules_to_save.", "")] = value
# DEAL WITH EMBEDDINGS
# check the common embedding layers in `target_modules` to reset `save_embedding_layers` if necessary
is_embedding_in_target_modules = False
if (
save_embedding_layers == "auto"
and hasattr(config, "target_modules")
and any(k in config.target_modules for k in EMBEDDING_LAYER_NAMES)
):
warnings.warn("Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`.")
save_embedding_layers = is_embedding_in_target_modules = True
elif save_embedding_layers == "auto":
vocab_size = getattr(getattr(model, "config", None), "vocab_size", None)
model_id = getattr(config, "base_model_name_or_path", None)
# For some models e.g. diffusers the text config file is stored in a subfolder
# we need to make sure we can download that config.
has_base_config = False
# ensure that this check is not performed in HF offline mode, see #1452
if model_id is not None:
local_config_exists = os.path.exists(os.path.join(model_id, "config.json"))
exists = local_config_exists or check_file_exists_on_hf_hub(model_id, "config.json")
if exists is None:
# check failed, could not determine if it exists or not
warnings.warn(
f"Could not find a config file in {model_id} - will assume that the vocabulary was not modified."
)
has_base_config = False
else:
has_base_config = exists
# check if the vocab size of the base model is different from the vocab size of the finetuned model
if (
vocab_size
and model_id
and has_base_config
and (vocab_size != model.config.__class__.from_pretrained(model_id).vocab_size)
):
warnings.warn(
"Setting `save_embedding_layers` to `True` as the embedding layer has been resized during finetuning."
)
save_embedding_layers = True
else:
save_embedding_layers = False
if save_embedding_layers and hasattr(model, "get_input_embeddings"):
for layer in [model.get_input_embeddings(), model.get_output_embeddings()]:
if not is_embedding_in_target_modules or has_valid_embedding_base_layer(layer):
# support from version >= 0.6.2
embedding_module_name = get_embedding_layer_name(model, layer, is_embedding_in_target_modules)
if embedding_module_name:
to_return.update({k: v for k, v in state_dict.items() if embedding_module_name in k})
elif save_embedding_layers:
warnings.warn("Could not identify embedding layer(s) because the model is not a 🤗 transformers model.")
# REMOVE ADAPTER NAME
to_return = {k.replace(f".{adapter_name}", ""): v for k, v in to_return.items()}
return to_return
def _find_mismatched_keys(
model: torch.nn.Module, peft_model_state_dict: dict[str, torch.Tensor], ignore_mismatched_sizes: bool = False
) -> tuple[dict[str, torch.Tensor], list[tuple[str, tuple[int, ...], tuple[int, ...]]]]:
if not ignore_mismatched_sizes:
return peft_model_state_dict, []
mismatched = []
state_dict = model.state_dict()
for key, tensor in peft_model_state_dict.items():
if key not in state_dict:
continue
# see https://github.com/huggingface/transformers/blob/09f9f566de83eef1f13ee83b5a1bbeebde5c80c1/src/transformers/modeling_utils.py#L3858-L3864
if (state_dict[key].shape[-1] == 1) and (state_dict[key].numel() * 2 == tensor.numel()):
# This skips size mismatches for 4-bit weights. Two 4-bit values share an 8-bit container, causing size
# differences. Without matching with module type or paramter type it seems like a practical way to detect
# valid 4bit weights.
continue
if state_dict[key].shape != tensor.shape:
mismatched.append((key, tensor.shape, state_dict[key].shape))
for key, _, _ in mismatched:
del peft_model_state_dict[key]
return peft_model_state_dict, mismatched
def set_peft_model_state_dict(
model, peft_model_state_dict, adapter_name="default", ignore_mismatched_sizes: bool = False
):
"""
Set the state dict of the Peft model.
Args:
model ([`PeftModel`]):
The Peft model.
peft_model_state_dict (`dict`):
The state dict of the Peft model.
adapter_name (`str`, *optional*, defaults to `"default"`):
The name of the adapter whose state dict should be set.
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
Whether to ignore mismatched in the state dict.
"""
config = model.peft_config[adapter_name]
state_dict = {}
if getattr(model, "modules_to_save", None) is not None:
for key, value in peft_model_state_dict.items():
if any(module_name in key for module_name in model.modules_to_save):
for module_name in model.modules_to_save:
if module_name in key:
key = key.replace(module_name, f"{module_name}.modules_to_save.{adapter_name}")
break
state_dict[key] = value
else:
state_dict = peft_model_state_dict
if config.peft_type in (
PeftType.LORA,
PeftType.LOHA,
PeftType.LOKR,
PeftType.ADALORA,
PeftType.IA3,
PeftType.OFT,
PeftType.POLY,
PeftType.LN_TUNING,
PeftType.BOFT,
PeftType.VERA,
PeftType.FOURIERFT,
PeftType.HRA,
):
peft_model_state_dict = {}
parameter_prefix = {
PeftType.IA3: "ia3_",
PeftType.LORA: "lora_",
PeftType.ADALORA: "lora_",
PeftType.LOHA: "hada_",
PeftType.LOKR: "lokr_",
PeftType.OFT: "oft_",
PeftType.POLY: "poly_",
PeftType.BOFT: "boft_",
PeftType.LN_TUNING: "ln_tuning_",
PeftType.VERA: "vera_lambda_",
PeftType.FOURIERFT: "fourierft_",
PeftType.HRA: "hra_",
}[config.peft_type]
for k, v in state_dict.items():
if parameter_prefix in k:
suffix = k.split(parameter_prefix)[1]
if "." in suffix:
suffix_to_replace = ".".join(suffix.split(".")[1:])
k = k.replace(suffix_to_replace, f"{adapter_name}.{suffix_to_replace}")
else:
k = f"{k}.{adapter_name}"
peft_model_state_dict[k] = v
else:
peft_model_state_dict[k] = v
if config.peft_type == PeftType.ADALORA:
rank_pattern = config.rank_pattern
if rank_pattern is not None:
model.resize_modules_by_rank_pattern(rank_pattern, adapter_name)
elif config.peft_type == PeftType.VERA:
if config.save_projection and "base_model.vera_A" not in peft_model_state_dict:
raise ValueError(
"Specified to load vera_A and vera_B from state dictionary however they were not present!"
)
elif not config.save_projection and "base_model.vera_A" in peft_model_state_dict:
warnings.warn(
"Specified to not load vera_A and vera_B from state dictionary however they are present in state"
" dictionary! Consider using them to ensure checkpoint loading is correct on all platforms using"
" `peft_config.save_projection = True`"
)
elif not config.save_projection: # and no vera_A in state dictionary
warnings.warn(
"Specified to not load vera_A and vera_B from state dictionary. This means we will be relying on"
" PRNG initialisation to restore these projections using `config.projection_prng_key`, which may"
" not be accurate on all system configurations."
)
elif config.peft_type == PeftType.LORA:
# Here we take care of a refactor of DoRA which changed lora_magnitude_vector from a ParameterDict to a
# ModuleDict with a DoraLayer instance. The old parameter is now the "weight" attribute of that layer.
old_dora_suffix = f"lora_magnitude_vector.{adapter_name}"
def renamed_dora_weights(k):
if k.endswith(old_dora_suffix):
k = k + ".weight"
return k
peft_model_state_dict = {renamed_dora_weights(k): v for k, v in peft_model_state_dict.items()}
elif config.is_prompt_learning or config.peft_type == PeftType.ADAPTION_PROMPT:
peft_model_state_dict = state_dict
elif config.peft_type == PeftType.XLORA:
peft_model_state_dict = state_dict
else:
raise NotImplementedError
peft_model_state_dict, mismatched_keys = _find_mismatched_keys(
model, peft_model_state_dict, ignore_mismatched_sizes=ignore_mismatched_sizes
)
load_result = model.load_state_dict(peft_model_state_dict, strict=False)
if config.is_prompt_learning:
model.prompt_encoder[adapter_name].embedding.load_state_dict(
{"weight": peft_model_state_dict["prompt_embeddings"]}, strict=True
)
if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
model.prompt_encoder[adapter_name].load_state_dict(peft_model_state_dict, strict=False)
if mismatched_keys:
# see https://github.com/huggingface/transformers/blob/09f9f566de83eef1f13ee83b5a1bbeebde5c80c1/src/transformers/modeling_utils.py#L4039
mismatched_warning = "\n".join(
[
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
for key, shape1, shape2 in mismatched_keys
]
)
msg = (
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint "
f"and are being ignored because you passed `ignore_mismatched_sizes=True`: {mismatched_warning}."
)
warnings.warn(msg)
return load_result
def torch_load(*args, weights_only=True, **kwargs):
"""Call torch.load and handle weights_only.
Defaults to weights_only=True to anticipate upcoming switch on the PyTorch side.
"""
# TODO: weights_only was added in 1.13, remove if 1.12 no longer needs to be supported
if version.parse(torch.__version__) < version.parse("1.13"):
return torch.load(*args, **kwargs)
return torch.load(*args, weights_only=weights_only, **kwargs)
def load_peft_weights(model_id: str, device: Optional[str] = None, **hf_hub_download_kwargs) -> dict:
r"""
A helper method to load the PEFT weights from the HuggingFace Hub or locally
Args:
model_id (`str`):
The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub.
device (`str`):
The device to load the weights onto.
hf_hub_download_kwargs (`dict`):
Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub.
"""
path = (
os.path.join(model_id, hf_hub_download_kwargs["subfolder"])
if hf_hub_download_kwargs.get("subfolder", None) is not None
else model_id
)
if device is None:
device = infer_device()
def get_hub_filename(use_safetensors=True):
weights_name = SAFETENSORS_WEIGHTS_NAME if use_safetensors else WEIGHTS_NAME
return (
os.path.join(hf_hub_download_kwargs["subfolder"], weights_name)
if hf_hub_download_kwargs.get("subfolder", None) is not None
else weights_name
)
if os.path.exists(os.path.join(path, SAFETENSORS_WEIGHTS_NAME)):
filename = os.path.join(path, SAFETENSORS_WEIGHTS_NAME)
use_safetensors = True
elif os.path.exists(os.path.join(path, WEIGHTS_NAME)):
filename = os.path.join(path, WEIGHTS_NAME)
use_safetensors = False
elif huggingface_hub.constants.HF_HUB_OFFLINE:
# if in offline mode, check if we can find the adapter file locally
hub_filename = get_hub_filename(use_safetensors=True)
try:
filename = hf_hub_download(model_id, hub_filename, local_files_only=True)
use_safetensors = True
except LocalEntryNotFoundError:
# Could not find safetensors, try pickle. If this also fails, it's fine to let the error be raised here, as
# it means that the user tried to load a non-cached model in offline mode.
hub_filename = get_hub_filename(use_safetensors=False)
filename = hf_hub_download(model_id, hub_filename, local_files_only=True)
use_safetensors = False
else:
token = hf_hub_download_kwargs.get("token", None)
if token is None:
token = hf_hub_download_kwargs.get("use_auth_token", None)
hub_filename = get_hub_filename(use_safetensors=True)
has_remote_safetensors_file = file_exists(
repo_id=model_id,
filename=hub_filename,
revision=hf_hub_download_kwargs.get("revision", None),
repo_type=hf_hub_download_kwargs.get("repo_type", None),
token=token,
)
use_safetensors = has_remote_safetensors_file
if has_remote_safetensors_file:
# Priority 1: load safetensors weights
filename = hf_hub_download(
model_id,
SAFETENSORS_WEIGHTS_NAME,
**hf_hub_download_kwargs,
)
else:
try:
filename = hf_hub_download(model_id, WEIGHTS_NAME, **hf_hub_download_kwargs)
except EntryNotFoundError:
raise ValueError(
f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. "
f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {model_id}."
)
if use_safetensors:
if hasattr(torch.backends, "mps") and (device == torch.device("mps")):
adapters_weights = safe_load_file(filename, device="cpu")
else:
adapters_weights = safe_load_file(filename, device=device)
else:
adapters_weights = torch_load(filename, map_location=torch.device(device))
return adapters_weights
|
peft/src/peft/utils/save_and_load.py/0
|
{
"file_path": "peft/src/peft/utils/save_and_load.py",
"repo_id": "peft",
"token_count": 10762
}
| 196
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from contextlib import contextmanager
from copy import deepcopy
from unittest.mock import patch
import pytest
import torch
from huggingface_hub.utils import reset_sessions
from scipy import stats
from torch import nn
from transformers import AutoModelForCausalLM
from peft import (
AdaLoraConfig,
LoraConfig,
PeftMixedModel,
PeftModel,
PeftModelForCausalLM,
PeftModelForFeatureExtraction,
PeftModelForQuestionAnswering,
PeftModelForSeq2SeqLM,
PeftModelForSequenceClassification,
PeftModelForTokenClassification,
PromptTuningConfig,
VeraConfig,
get_peft_model,
)
from peft.utils import infer_device
class TestLoraInitialization:
"""Test class to check the initialization of LoRA adapters."""
torch_device = infer_device()
def get_uniform(self, amin, amax, size=(10000,)):
unif = torch.distributions.uniform.Uniform(amin, amax)
samples = unif.sample(size)
return samples
def get_normal(self, mean, std, size=(10000,)):
normal = torch.distributions.normal.Normal(mean, std)
samples = normal.sample(size)
return samples
def get_model(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
# choose a large weight so that averages are close to expected values
self.linear = nn.Linear(1000, 1000)
self.embed = nn.Embedding(1000, 1000)
self.conv2d = nn.Conv2d(100, 100, 3)
def forward(self, x):
x_int = (100 * x).int()
x_4d = x.flatten().reshape(1, 100, 10, 10)
return self.linear(x), self.embed(x_int), self.conv2d(x_4d)
return MyModule().eval().to(self.torch_device)
@pytest.fixture
def data(self):
return torch.rand(10, 1000).to(self.torch_device)
def test_lora_linear_init_default(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["linear"])
model = get_peft_model(model, config)
weight_A = model.linear.lora_A["default"].weight
weight_B = model.linear.lora_B["default"].weight
# use statistical test to check if weight A is from a uniform distribution
unif = self.get_uniform(weight_A.min().item(), weight_A.max().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value > 0.5
# check that weight A is *not* from a normal distribution
normal = self.get_normal(weight_A.mean().item(), weight_A.std().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight B is zero
assert (weight_B == 0.0).all()
def test_lora_linear_init_gaussian(self):
# use gaussian init
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["linear"], init_lora_weights="gaussian")
model = get_peft_model(model, config)
weight_A = model.linear.lora_A["default"].weight
weight_B = model.linear.lora_B["default"].weight
# use statistical test to check if weight A is from a normal distribution
normal = self.get_normal(0.0, 1 / config.r)
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
# import matplotlib.pyplot as plt
# x = weight_A.detach().flatten().cpu().numpy()
# breakpoint()
assert p_value > 0.5
# check that weight A is *not* from a uniform distribution
unif = self.get_uniform(weight_A.min().item(), weight_A.max().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight B is zero
assert (weight_B == 0.0).all()
def test_lora_linear_false(self):
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["linear"], init_lora_weights=False)
model = get_peft_model(model, config)
weight_B = model.linear.lora_B["default"].weight
# with init_lora_weights=False, weight B should *not* be zero. We don't care so much about the actual values
# as long as they are not zero, in order to avoid identity transformation.
assert not torch.allclose(weight_B, torch.zeros_like(weight_B))
def test_lora_embedding_default(self):
# embedding is initialized as a normal distribution, not kaiming uniform
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["embed"])
model = get_peft_model(model, config)
weight_A = model.embed.lora_embedding_A["default"]
weight_B = model.embed.lora_embedding_B["default"]
# use statistical test to check if weight B is from a normal distribution
normal = self.get_normal(0.0, 1.0)
_, p_value = stats.kstest(weight_B.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
assert p_value > 0.5
# check that weight B is *not* from a uniform distribution
unif = self.get_uniform(weight_B.min().item(), weight_B.max().item())
_, p_value = stats.kstest(weight_B.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight A is zero
assert (weight_A == 0.0).all()
def test_lora_embedding_gaussian(self):
# embedding does not change with init_lora_weights="gaussian" vs True
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["embed"], init_lora_weights="gaussian")
model = get_peft_model(model, config)
weight_A = model.embed.lora_embedding_A["default"]
weight_B = model.embed.lora_embedding_B["default"]
# use statistical test to check if weight B is from a normal distribution
normal = self.get_normal(0.0, 1.0)
_, p_value = stats.kstest(weight_B.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
assert p_value > 0.5
# check that weight B is *not* from a uniform distribution
unif = self.get_uniform(weight_B.min().item(), weight_B.max().item())
_, p_value = stats.kstest(weight_B.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight A is zero
assert (weight_A == 0.0).all()
def test_lora_embedding_false(self):
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["embed"], init_lora_weights=False)
model = get_peft_model(model, config)
weight_A = model.embed.lora_embedding_B["default"]
# with init_lora_weights=False, weight A should *not* be zero. We don't care so much about the actual values
# as long as they are not zero, in order to avoid identity transformation.
assert not torch.allclose(weight_A, torch.zeros_like(weight_A))
def test_lora_conv2d_default(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["conv2d"])
model = get_peft_model(model, config)
weight_A = model.conv2d.lora_A["default"].weight
weight_B = model.conv2d.lora_B["default"].weight
# use statistical test to check if weight A is from a uniform distribution
unif = self.get_uniform(weight_A.min().item(), weight_A.max().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value > 0.5
# check that weight A is *not* from a normal distribution
normal = self.get_normal(weight_A.mean().item(), weight_A.std().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight B is zero
assert (weight_B == 0.0).all()
def test_lora_conv2d_init_gaussian(self):
# use gaussian init
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["conv2d"], init_lora_weights="gaussian")
model = get_peft_model(model, config)
weight_A = model.conv2d.lora_A["default"].weight
weight_B = model.conv2d.lora_B["default"].weight
# use statistical test to check if weight A is from a normal distribution
normal = self.get_normal(0.0, 1 / config.r)
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), normal.flatten().cpu().numpy())
assert p_value > 0.5
# check that weight A is *not* from a uniform distribution
unif = self.get_uniform(weight_A.min().item(), weight_A.max().item())
_, p_value = stats.kstest(weight_A.detach().flatten().cpu().numpy(), unif.flatten().cpu().numpy())
assert p_value < 0.05
# check that weight B is zero
assert (weight_B == 0.0).all()
def test_lora_conv2d_false(self):
torch.manual_seed(0)
model = self.get_model()
config = LoraConfig(target_modules=["conv2d"], init_lora_weights=False)
model = get_peft_model(model, config)
weight_B = model.conv2d.lora_B["default"].weight
# with init_lora_weights=False, weight B should *not* be zero. We don't care so much about the actual values
# as long as they are not zero, in order to avoid identity transformation.
assert not torch.allclose(weight_B, torch.zeros_like(weight_B))
def test_lora_scaling_default(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
# check scaling factor use_rslora=False
config = LoraConfig(target_modules=["linear", "embed", "conv2d"], lora_alpha=3, r=16, use_rslora=False)
model = get_peft_model(model, config)
expected_scaling = config.lora_alpha / config.r
assert model.linear.scaling["default"] == expected_scaling
assert model.embed.scaling["default"] == expected_scaling
assert model.conv2d.scaling["default"] == expected_scaling
def test_lora_pissa_linear_init_default(self, data):
model = self.get_model()
output = model(data)[0]
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"])
peft_model = get_peft_model(deepcopy(model), config)
assert torch.allclose(output, peft_model(data)[0], atol=1e-06)
config = LoraConfig(init_lora_weights="pissa_niter_16", target_modules=["linear"])
peft_model = get_peft_model(deepcopy(model), config)
assert torch.allclose(output, peft_model(data)[0], atol=1e-06)
def test_lora_olora_linear_init_default(self, data):
model = self.get_model()
output = model(data)[0]
# Both OLoRA and olora should work
config = LoraConfig(init_lora_weights="OLoRA", target_modules=["linear"])
peft_model = get_peft_model(deepcopy(model), config)
assert torch.allclose(output, peft_model(data)[0], atol=1e-06)
def test_lora_pissa_conversion_same_output_after_loading(self, data, tmp_path):
model = self.get_model()
output_base = model(data)[0]
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "pissa"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_pissa = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_pissa, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "pissa-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_pissa, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_config_keys_before = list(peft_model.peft_config.keys())
peft_model.save_pretrained(
tmp_path / "pissa-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
peft_config_keys_after = list(peft_model.peft_config.keys())
assert peft_config_keys_before == peft_config_keys_after
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_lora_pissa_conversion_same_output_after_loading_with_rank_pattern(self, data, tmp_path):
# same as above, but using rank_pattern
model = self.get_model()
output_base = model(data)[0]
# use rank_pattern here; note that since there is only a single linear layer, r is completely overridden
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8, rank_pattern={"linear": 32})
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "pissa"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_pissa = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_pissa, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "pissa-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_pissa, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 32
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "pissa-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 64
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_lora_pissa_conversion_same_output_after_loading_with_alpha_pattern(self, data, tmp_path):
# same as above, but using alpha_pattern
model = self.get_model()
output_base = model(data)[0]
# use alpha_pattern here; note that since there is only a single linear layer, lora_alpha is completely
# overridden
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], alpha_pattern={"linear": 5})
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "pissa"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_pissa = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_pissa, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "pissa-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_pissa, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
assert model_loaded.base_model.model.linear.scaling["default"] == 5 / 8
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "pissa-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
assert model_converted.base_model.model.linear.scaling["default"] == 10 / 16
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_lora_pissa_conversion_same_output_after_loading_with_rslora(self, data, tmp_path):
model = self.get_model()
output_base = model(data)[0]
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8, use_rslora=True)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "pissa"
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_pissa = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_pissa, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "pissa-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_pissa, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
assert model_loaded.base_model.model.linear.scaling["default"] == 8 / (8**0.5)
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "pissa-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
# same scale as before with a little bit of floating point imprecision
assert model_converted.base_model.model.linear.scaling["default"] == pytest.approx(8 / (8**0.5))
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_pissa_rank_pattern_and_rslora_raises(self, tmp_path):
# it's not possible to determine the correct scale when using rslora with rank or alpha pattern, because the
# scale is not stored in the state_dict
model = self.get_model()
config = LoraConfig(
init_lora_weights="pissa", target_modules=["linear"], r=8, rank_pattern={"linear": 2}, use_rslora=True
)
peft_model = get_peft_model(model, config)
peft_model.save_pretrained(tmp_path / "init-model")
msg = re.escape("Passing `path_initial_model_for_weight_conversion` to `save_pretrained`")
with pytest.raises(ValueError, match=msg):
peft_model.save_pretrained(
tmp_path / "pissa-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
def test_pissa_alpha_pattern_and_rslora_raises(self, tmp_path):
# it's not possible to determine the correct scale when using rslora with rank or alpha pattern, because the
# scale is not stored in the state_dict
model = self.get_model()
config = LoraConfig(
init_lora_weights="pissa", target_modules=["linear"], r=8, alpha_pattern={"linear": 2}, use_rslora=True
)
peft_model = get_peft_model(model, config)
peft_model.save_pretrained(tmp_path / "init-model")
msg = re.escape("Passing `path_initial_model_for_weight_conversion` to `save_pretrained`")
with pytest.raises(ValueError, match=msg):
peft_model.save_pretrained(
tmp_path / "pissa-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
# TODO: remove test for deprecated arg in PEFT v0.14.0
def test_lora_pissa_conversion_same_output_after_loading_with_deprecated_arg(self, data, tmp_path):
model = self.get_model()
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8)
peft_model = get_peft_model(deepcopy(model), config)
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model.peft_config["default"].init_lora_weights = "pissa"
tol = 1e-06
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_pissa = peft_model(data)[0]
peft_model.save_pretrained(tmp_path / "pissa-model-converted", convert_pissa_to_lora=tmp_path / "init-model")
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol)
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# TODO: remove test for deprecated warning in PEFT v0.14.0
def test_lora_pissa_conversion_deprecated_warning(self, data, tmp_path):
model = self.get_model()
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8)
peft_model = get_peft_model(deepcopy(model), config)
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
warning_message = "`convert_pissa_to_lora` is deprecated and will be removed in a future version. Use `path_initial_model_for_weight_conversion` instead."
# Test the warning
with pytest.warns(UserWarning, match=warning_message):
peft_model.save_pretrained(
tmp_path / "pissa-model-converted", convert_pissa_to_lora=tmp_path / "init-model"
)
def test_olora_conversion_same_output_after_loading(self, data, tmp_path):
model = self.get_model()
output_base = model(data)[0]
config = LoraConfig(init_lora_weights="olora", target_modules=["linear"], r=8)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.save_pretrained(tmp_path / "init-model")
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_olora = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_olora, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "olora-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_olora, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_config_keys_before = list(peft_model.peft_config.keys())
peft_model.save_pretrained(
tmp_path / "olora-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
peft_config_keys_after = list(peft_model.peft_config.keys())
assert peft_config_keys_before == peft_config_keys_after
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_olora, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_olora_conversion_same_output_after_loading_with_rank_pattern(self, data, tmp_path):
# same as above, but using rank_pattern
model = self.get_model()
output_base = model(data)[0]
# use rank_pattern here; note that since there is only a single linear layer, r is completely overridden
config = LoraConfig(init_lora_weights="olora", target_modules=["linear"], r=8, rank_pattern={"linear": 32})
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.save_pretrained(tmp_path / "init-model")
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_olora = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_olora, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "olora-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_olora, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 32
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "olora-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_olora, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 64
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_olora_conversion_same_output_after_loading_with_alpha_pattern(self, data, tmp_path):
# same as above, but using alpha_pattern
model = self.get_model()
output_base = model(data)[0]
# use alpha_pattern here; note that since there is only a single linear layer, lora_alpha is completely
# overridden
config = LoraConfig(init_lora_weights="olora", target_modules=["linear"], alpha_pattern={"linear": 5})
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.save_pretrained(tmp_path / "init-model")
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_olora = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_olora, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "olora-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_olora, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
assert model_loaded.base_model.model.linear.scaling["default"] == 5 / 8
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "olora-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_olora, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
assert model_converted.base_model.model.linear.scaling["default"] == 10 / 16
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_olora_conversion_same_output_after_loading_with_rslora(self, data, tmp_path):
# same as above, but using alpha_pattern
model = self.get_model()
output_base = model(data)[0]
config = LoraConfig(init_lora_weights="olora", target_modules=["linear"], r=8, use_rslora=True)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.save_pretrained(tmp_path / "init-model")
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_olora = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_olora, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "olora-model")
model_loaded = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_olora, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
assert model_loaded.base_model.model.linear.scaling["default"] == 8 / (8**0.5)
# sanity check: the base model weights were indeed changed
assert not torch.allclose(
model.linear.weight, model_loaded.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "olora-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "olora-model-converted")
output_converted = model_converted(data)[0]
assert torch.allclose(output_olora, output_converted, atol=tol, rtol=tol)
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
# same scale as before with a little bit of floating point imprecision
assert model_converted.base_model.model.linear.scaling["default"] == pytest.approx(8 / (8**0.5))
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
def test_olora_rank_pattern_and_rslora_raises(self, tmp_path):
# it's not possible to determine the correct scale when using rslora with rank or alpha pattern, because the
# scale is not stored in the state_dict
model = self.get_model()
config = LoraConfig(
init_lora_weights="olora", target_modules=["linear"], r=8, rank_pattern={"linear": 2}, use_rslora=True
)
peft_model = get_peft_model(model, config)
peft_model.save_pretrained(tmp_path / "init-model")
msg = re.escape("Passing `path_initial_model_for_weight_conversion` to `save_pretrained`")
with pytest.raises(ValueError, match=msg):
peft_model.save_pretrained(
tmp_path / "olora-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
def test_olora_alpha_pattern_and_rslora_raises(self, tmp_path):
# it's not possible to determine the correct scale when using rslora with rank or alpha pattern, because the
# scale is not stored in the state_dict
model = self.get_model()
config = LoraConfig(
init_lora_weights="olora", target_modules=["linear"], r=8, alpha_pattern={"linear": 2}, use_rslora=True
)
peft_model = get_peft_model(model, config)
peft_model.save_pretrained(tmp_path / "init-model")
msg = re.escape("Passing `path_initial_model_for_weight_conversion` to `save_pretrained`")
with pytest.raises(ValueError, match=msg):
peft_model.save_pretrained(
tmp_path / "olora-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
@pytest.mark.parametrize(
"config_kwargs, should_warn",
[
# no warning
({"init_lora_weights": "pissa", "target_modules": ["linear"]}, False),
({"init_lora_weights": "pissa_niter_3", "target_modules": ["linear"]}, False),
({"init_lora_weights": "olora", "target_modules": ["linear"]}, False),
({"init_lora_weights": "pissa", "target_modules": ["linear"], "use_rslora": True}, False),
({"init_lora_weights": "pissa_niter_3", "target_modules": ["linear"], "use_rslora": True}, False),
({"init_lora_weights": "olora", "target_modules": ["linear"], "use_rslora": True}, False),
({"init_lora_weights": "pissa", "target_modules": ["linear"], "rank_pattern": {"linear": 8}}, False),
(
{"init_lora_weights": "pissa_niter_3", "target_modules": ["linear"], "rank_pattern": {"linear": 8}},
False,
),
({"init_lora_weights": "olora", "target_modules": ["linear"], "rank_pattern": {"linear": 8}}, False),
({"init_lora_weights": "pissa", "target_modules": ["linear"], "alpha_pattern": {"linear": 8}}, False),
(
{"init_lora_weights": "pissa_niter_3", "target_modules": ["linear"], "alpha_pattern": {"linear": 8}},
False,
),
({"init_lora_weights": "olora", "target_modules": ["linear"], "alpha_pattern": {"linear": 8}}, False),
# warning
(
{
"init_lora_weights": "pissa",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "pissa_niter_3",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "olora",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "pissa",
"target_modules": ["linear"],
"use_rslora": True,
"alpha_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "pissa_niter_3",
"target_modules": ["linear"],
"use_rslora": True,
"alpha_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "olora",
"target_modules": ["linear"],
"use_rslora": True,
"alpha_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "pissa",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
"alpha_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "pissa_niter_3",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
"alpha_pattern": {"linear": 8},
},
True,
),
(
{
"init_lora_weights": "olora",
"target_modules": ["linear"],
"use_rslora": True,
"rank_pattern": {"linear": 8},
"alpha_pattern": {"linear": 8},
},
True,
),
],
)
def test_lora_config_pissa_olora_warns(self, config_kwargs, should_warn, recwarn):
# Using post training conversion of modified base weights to restore their initial values (PiSSA, OLoRA) cannot
# be correctly done when using rslora + rank_pattern/alpha_pattern. We can't really know if the user intends
# this when they'll eventually call save_pretrained (i.e. if they'll pass
# path_initial_model_for_weight_conversionl). Therefore, we only warn but don't raise an error here.
msg = re.escape("Using Rank-Stabilized LoRA with rank_pattern/alpha_pattern and post-training conversion")
if should_warn:
LoraConfig(**config_kwargs)
assert len(recwarn.list) == 1
with pytest.warns(UserWarning, match=msg):
LoraConfig(**config_kwargs)
else:
LoraConfig(**config_kwargs)
assert not recwarn.list
def test_lora_rslora_scaling(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
# check scaling factor use_rslora=True
config = LoraConfig(target_modules=["linear", "embed", "conv2d"], lora_alpha=3, r=16, use_rslora=True)
model = get_peft_model(model, config)
expected_scaling = config.lora_alpha / (config.r**0.5)
assert model.linear.scaling["default"] == expected_scaling
assert model.embed.scaling["default"] == expected_scaling
assert model.conv2d.scaling["default"] == expected_scaling
def test_lora_default_scaling_pattern(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
# check scaling factor use_rslora=False with rank and alpha pattern
config = LoraConfig(
target_modules=["linear", "embed", "conv2d"],
rank_pattern={"embed": 9, "conv2d": 16},
alpha_pattern={"linear": 11, "conv2d": 13},
lora_alpha=17,
r=25,
use_rslora=False,
)
model = get_peft_model(model, config)
expected_scaling = {
"linear": config.alpha_pattern["linear"] / config.r,
"embed": config.lora_alpha / config.rank_pattern["embed"],
"conv2d": config.alpha_pattern["conv2d"] / config.rank_pattern["conv2d"],
}
assert model.linear.scaling["default"] == expected_scaling["linear"]
assert model.embed.scaling["default"] == expected_scaling["embed"]
assert model.conv2d.scaling["default"] == expected_scaling["conv2d"]
def test_lora_rslora_scaling_pattern(self):
# default is True
torch.manual_seed(0)
model = self.get_model()
# check scaling factor use_rslora=True with rank and alpha pattern
config = LoraConfig(
target_modules=["linear", "embed", "conv2d"],
rank_pattern={"embed": 9, "conv2d": 16},
alpha_pattern={"linear": 11, "conv2d": 13},
lora_alpha=17,
r=25,
use_rslora=True,
)
model = get_peft_model(model, config)
expected_scaling = {
"linear": config.alpha_pattern["linear"] / (config.r**0.5),
"embed": config.lora_alpha / (config.rank_pattern["embed"] ** 0.5),
"conv2d": config.alpha_pattern["conv2d"] / (config.rank_pattern["conv2d"] ** 0.5),
}
assert model.linear.scaling["default"] == expected_scaling["linear"]
assert model.embed.scaling["default"] == expected_scaling["embed"]
assert model.conv2d.scaling["default"] == expected_scaling["conv2d"]
def test_lora_use_dora_linear(self, data):
# check that dora is a no-op when initialized
torch.manual_seed(0)
model = self.get_model()
output_base, _, _ = model(data)
# check scaling factor use_rslora=True
config = LoraConfig(target_modules=["linear"], use_dora=True)
model = get_peft_model(model, config)
with model.disable_adapter():
output_disabled, _, _ = model(data)
output_dora, _, _ = model(data)
assert torch.allclose(output_base, output_disabled)
assert torch.allclose(output_base, output_dora)
def test_lora_use_dora_linear_init_false(self, data):
# with init_lora_weights=False, dora should not be a no-op
torch.manual_seed(0)
model = self.get_model()
output_base, _, _ = model(data)
# check scaling factor use_rslora=True
config = LoraConfig(target_modules=["linear"], use_dora=True, init_lora_weights=False)
model = get_peft_model(model, config)
with model.disable_adapter():
output_disabled, _, _ = model(data)
output_dora, _, _ = model(data)
assert torch.allclose(output_base, output_disabled)
assert not torch.allclose(output_base, output_dora)
def test_lora_use_dora_with_megatron_core_raises(self):
megatron_config = {"does-not": "matter-here"}
with pytest.raises(ValueError, match="DoRA does not support megatron_core"):
LoraConfig(target_modules=["linear"], use_dora=True, megatron_config=megatron_config)
class TestAdaLoraInitialization:
torch_device = infer_device()
def test_adalora_target_modules_set(self):
config = AdaLoraConfig(target_modules=["linear", "embed", "conv2d"])
assert config.target_modules == {"linear", "embed", "conv2d"}
def test_adalora_use_dora_raises(self):
with pytest.raises(ValueError, match="ADALORA does not support DoRA"):
AdaLoraConfig(use_dora=True)
def test_adalora_loftq_config_raises(self):
with pytest.raises(ValueError, match="ADALORA does not support LOFTQ"):
AdaLoraConfig(loftq_config={"loftq": "config"})
def get_model(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
# choose a large weight so that averages are close to expected values
self.linear = nn.Linear(1000, 1000)
def forward(self, x):
return self.linear(x)
return MyModule().eval().to(self.torch_device)
@pytest.fixture
def data(self):
return torch.rand(10, 1000).to(self.torch_device)
def test_adalora_default_init_identity(self, data):
# default is True
torch.manual_seed(0)
model = self.get_model()
output_before = model(data)
config = AdaLoraConfig(target_modules=["linear"])
model = get_peft_model(model, config)
output_after = model(data)
assert torch.allclose(output_before, output_after)
class TestPromptTuningInitialization:
torch_device = infer_device()
def get_model(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
# choose a large weight so that averages are close to expected values
self.linear = nn.Linear(1000, 1000)
self.embed = nn.Embedding(1000, 1000)
self.conv2d = nn.Conv2d(100, 100, 3)
def forward(self, x):
x_int = (100 * x).int()
x_4d = x.flatten().reshape(1, 100, 10, 10)
return self.linear(x), self.embed(x_int), self.conv2d(x_4d)
return MyModule().eval().to(self.torch_device)
def test_use_prompt_tuning_init_text_raises(self):
with pytest.raises(ValueError, match="When prompt_tuning_init='TEXT', tokenizer_name_or_path can't be None"):
PromptTuningConfig(prompt_tuning_init="TEXT", prompt_tuning_init_text="prompt tuning init text")
with pytest.raises(ValueError, match="When prompt_tuning_init='TEXT', prompt_tuning_init_text can't be None"):
PromptTuningConfig(prompt_tuning_init="TEXT", tokenizer_name_or_path="t5-base")
class TestVeraInitialization:
torch_device = infer_device()
def get_model(self):
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.lin1 = nn.Linear(20, 2, bias=bias)
def forward(self, X):
X = self.lin0(X)
X = self.lin1(X)
return X
return MLP().to(self.torch_device)
def test_vera_mixing_save_projection_raises(self):
# it is unclear what the right thing to do would be if some adapters save the projection weights and some don't
# so we better raise an error
config0 = VeraConfig(target_modules=["lin0"], init_weights=False, save_projection=True)
model = self.get_model()
model = get_peft_model(model, config0)
config1 = VeraConfig(target_modules=["lin0"], init_weights=False, save_projection=False)
msg = re.escape(
"VeRA projection weights must be saved for all adapters or none, but got multiple different values: "
"[False, True]"
)
with pytest.raises(ValueError, match=msg):
model.add_adapter("other", config1)
def test_vera_add_second_adapter_with_incompatible_input_shape(self):
config0 = VeraConfig(target_modules=["lin0"], r=8)
config1 = VeraConfig(target_modules=["lin1"])
base_model = self.get_model()
lin0_in_feat = base_model.lin0.in_features
lin1_in_feat = base_model.lin1.in_features
model = get_peft_model(base_model, config0)
# not full message but enough to identify the error
msg = f"vera_A has a size of {lin0_in_feat} but {lin1_in_feat} or greater is required"
with pytest.raises(ValueError, match=msg):
model.add_adapter("other", config1)
def test_vera_add_second_adapter_with_higher_rank(self):
rank0 = 123
rank1 = 456
config0 = VeraConfig(target_modules=["lin0"], r=rank0)
# second adapter has higher rank
config1 = VeraConfig(target_modules=["lin0"], r=rank1)
model = get_peft_model(self.get_model(), config0)
# not full message but enough to identify the error
msg = f"vera_A has a size of {rank0} but {rank1} or greater is required"
with pytest.raises(ValueError, match=msg):
model.add_adapter("other", config1)
class TestNoInfiniteRecursionDeepspeed:
# see #1892 for details
classes = [
PeftModel,
PeftMixedModel,
PeftModelForSequenceClassification,
PeftModelForQuestionAnswering,
PeftModelForTokenClassification,
PeftModelForCausalLM,
PeftModelForSeq2SeqLM,
PeftModelForFeatureExtraction,
]
@pytest.fixture
def wrap_init(self):
# emulates the wrapper from DeepSpeed
import functools
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
hasattr(self, "abc") # any hasattr will do
f(self, *args, **kwargs)
return wrapper
return decorator
@pytest.fixture
def model(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 10)
# to emulate LMs:
self.prepare_inputs_for_generation = None
self._prepare_encoder_decoder_kwargs_for_generation = None
return MyModule()
@pytest.mark.parametrize("cls", classes)
def test_no_infinite_recursion(self, cls, model, wrap_init):
original_init = cls.__init__
try:
cls.__init__ = wrap_init(cls.__init__)
# this would trigger an infinite loop before the fix in 1892
cls(model, LoraConfig(target_modules=["linear"]))
finally:
# ensure there are no side effects of this test
cls.__init__ = original_init
class TestLoadAdapterOfflineMode:
# make sure that PEFT honors offline mode
@contextmanager
def hub_offline_ctx(self):
# this is required to simulate offline mode, setting the env var dynamically inside the test does not work
# because the value is checked only once at the start of the session
with patch("huggingface_hub.constants.HF_HUB_OFFLINE", True):
reset_sessions()
yield
reset_sessions()
def test_load_from_hub_then_offline_model(self):
# this uses LoRA but it's the same mechanism for other methods
peft_model_id = "peft-internal-testing/gpt2-lora-random"
base_model = AutoModelForCausalLM.from_pretrained("gpt2")
# first ensure that the adapter model has been downloaded
PeftModel.from_pretrained(base_model, peft_model_id)
del base_model
base_model = AutoModelForCausalLM.from_pretrained("gpt2")
with self.hub_offline_ctx():
# does not raise
PeftModel.from_pretrained(base_model, peft_model_id)
class TestCustomModelConfigWarning:
# Check potential warnings when the user provided base_model_name_or_path is overridden by PEFT. See #2001 for
# context. We use LoRA for this test but the same applies to other methods
@pytest.fixture
def custom_module(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(10, 10)
return MyModule()
def test_no_warning_by_default_transformers_model(self, recwarn):
# first a sanity test that there is no warning by default when using a model from transformers
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-OPTForCausalLM")
get_peft_model(model, LoraConfig())
for warning in recwarn.list:
assert "renamed" not in str(warning.message)
def test_no_warning_by_default_custom_model(self, custom_module, recwarn):
# same as above but with a custom model
get_peft_model(custom_module, LoraConfig(target_modules=["lin"]))
for warning in recwarn.list:
assert "renamed" not in str(warning.message)
def test_warning_name_transformers_model(self, recwarn):
# The base_model_name_or_path provided by the user is overridden.
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-OPTForCausalLM")
custom_name = "custom_name"
get_peft_model(model, LoraConfig(base_model_name_or_path=custom_name))
msg = f"was renamed from '{custom_name}' to 'hf-internal-testing/tiny-random-OPTForCausalLM'"
assert any(msg in str(warning.message) for warning in recwarn.list)
def test_warning_name_custom_model(self, custom_module, recwarn):
custom_name = "custom_name"
get_peft_model(custom_module, LoraConfig(target_modules=["lin"], base_model_name_or_path=custom_name))
msg = f"was renamed from '{custom_name}' to 'None'"
assert any(msg in str(warning.message) for warning in recwarn.list)
def test_warning_name_custom_model_with_custom_name(self, custom_module, recwarn):
custom_name = "custom_name"
custom_module.name_or_path = "foobar"
get_peft_model(custom_module, LoraConfig(target_modules=["lin"], base_model_name_or_path=custom_name))
msg = f"was renamed from '{custom_name}' to 'foobar'"
assert any(msg in str(warning.message) for warning in recwarn.list)
|
peft/tests/test_initialization.py/0
|
{
"file_path": "peft/tests/test_initialization.py",
"repo_id": "peft",
"token_count": 25439
}
| 197
|
#!/bin/bash
NUM_PROC=$1
shift
torchrun --nproc_per_node=$NUM_PROC train.py "$@"
|
pytorch-image-models/distributed_train.sh/0
|
{
"file_path": "pytorch-image-models/distributed_train.sh",
"repo_id": "pytorch-image-models",
"token_count": 37
}
| 198
|
# Deep Layer Aggregation
Extending “shallow” skip connections, **Dense Layer Aggregation (DLA)** incorporates more depth and sharing. The authors introduce two structures for deep layer aggregation (DLA): iterative deep aggregation (IDA) and hierarchical deep aggregation (HDA). These structures are expressed through an architectural framework, independent of the choice of backbone, for compatibility with current and future networks.
IDA focuses on fusing resolutions and scales while HDA focuses on merging features from all modules and channels. IDA follows the base hierarchy to refine resolution and aggregate scale stage-bystage. HDA assembles its own hierarchy of tree-structured connections that cross and merge stages to aggregate different levels of representation.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('dla102', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `dla102`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('dla102', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{yu2019deep,
title={Deep Layer Aggregation},
author={Fisher Yu and Dequan Wang and Evan Shelhamer and Trevor Darrell},
year={2019},
eprint={1707.06484},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: DLA
Paper:
Title: Deep Layer Aggregation
URL: https://paperswithcode.com/paper/deep-layer-aggregation
Models:
- Name: dla102
In Collection: DLA
Metadata:
FLOPs: 7192952808
Parameters: 33270000
File Size: 135290579
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L410
Weights: http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.03%
Top 5 Accuracy: 93.95%
- Name: dla102x
In Collection: DLA
Metadata:
FLOPs: 5886821352
Parameters: 26310000
File Size: 107552695
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102x
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L418
Weights: http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.51%
Top 5 Accuracy: 94.23%
- Name: dla102x2
In Collection: DLA
Metadata:
FLOPs: 9343847400
Parameters: 41280000
File Size: 167645295
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla102x2
LR: 0.1
Epochs: 120
Layers: 102
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L426
Weights: http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.44%
Top 5 Accuracy: 94.65%
- Name: dla169
In Collection: DLA
Metadata:
FLOPs: 11598004200
Parameters: 53390000
File Size: 216547113
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: dla169
LR: 0.1
Epochs: 120
Layers: 169
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L434
Weights: http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.69%
Top 5 Accuracy: 94.33%
- Name: dla34
In Collection: DLA
Metadata:
FLOPs: 3070105576
Parameters: 15740000
File Size: 63228658
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla34
LR: 0.1
Epochs: 120
Layers: 32
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L362
Weights: http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.62%
Top 5 Accuracy: 92.06%
- Name: dla46_c
In Collection: DLA
Metadata:
FLOPs: 583277288
Parameters: 1300000
File Size: 5307963
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla46_c
LR: 0.1
Epochs: 120
Layers: 46
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L369
Weights: http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 64.87%
Top 5 Accuracy: 86.29%
- Name: dla46x_c
In Collection: DLA
Metadata:
FLOPs: 544052200
Parameters: 1070000
File Size: 4387641
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla46x_c
LR: 0.1
Epochs: 120
Layers: 46
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L378
Weights: http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 65.98%
Top 5 Accuracy: 86.99%
- Name: dla60
In Collection: DLA
Metadata:
FLOPs: 4256251880
Parameters: 22040000
File Size: 89560235
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60
LR: 0.1
Epochs: 120
Layers: 60
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L394
Weights: http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.04%
Top 5 Accuracy: 93.32%
- Name: dla60_res2net
In Collection: DLA
Metadata:
FLOPs: 4147578504
Parameters: 20850000
File Size: 84886593
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60_res2net
Layers: 60
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L346
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.46%
Top 5 Accuracy: 94.21%
- Name: dla60_res2next
In Collection: DLA
Metadata:
FLOPs: 3485335272
Parameters: 17030000
File Size: 69639245
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60_res2next
Layers: 60
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L354
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.44%
Top 5 Accuracy: 94.16%
- Name: dla60x
In Collection: DLA
Metadata:
FLOPs: 3544204264
Parameters: 17350000
File Size: 70883139
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60x
LR: 0.1
Epochs: 120
Layers: 60
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L402
Weights: http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.25%
Top 5 Accuracy: 94.02%
- Name: dla60x_c
In Collection: DLA
Metadata:
FLOPs: 593325032
Parameters: 1320000
File Size: 5454396
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- DLA Bottleneck Residual Block
- DLA Residual Block
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: dla60x_c
LR: 0.1
Epochs: 120
Layers: 60
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/dla.py#L386
Weights: http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 67.91%
Top 5 Accuracy: 88.42%
-->
|
pytorch-image-models/hfdocs/source/models/dla.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/dla.mdx",
"repo_id": "pytorch-image-models",
"token_count": 6758
}
| 199
|
# Inception ResNet v2
**Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('inception_resnet_v2', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `inception_resnet_v2`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('inception_resnet_v2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{szegedy2016inceptionv4,
title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning},
author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi},
year={2016},
eprint={1602.07261},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Inception ResNet v2
Paper:
Title: Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning
URL: https://paperswithcode.com/paper/inception-v4-inception-resnet-and-the-impact
Models:
- Name: inception_resnet_v2
In Collection: Inception ResNet v2
Metadata:
FLOPs: 16959133120
Parameters: 55850000
File Size: 223774238
Architecture:
- Average Pooling
- Dropout
- Inception-ResNet-v2 Reduction-B
- Inception-ResNet-v2-A
- Inception-ResNet-v2-B
- Inception-ResNet-v2-C
- Reduction-A
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 20x NVIDIA Kepler GPUs
ID: inception_resnet_v2
LR: 0.045
Dropout: 0.2
Crop Pct: '0.897'
Momentum: 0.9
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_resnet_v2.py#L343
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 0.95%
Top 5 Accuracy: 17.29%
-->
|
pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/inception-resnet-v2.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1682
}
| 200
|
# Res2NeXt
**Res2NeXt** is an image model that employs a variation on [ResNeXt](https://paperswithcode.com/method/resnext) bottleneck residual blocks. The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('res2next50', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `res2next50`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('res2next50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@article{Gao_2021,
title={Res2Net: A New Multi-Scale Backbone Architecture},
volume={43},
ISSN={1939-3539},
url={http://dx.doi.org/10.1109/TPAMI.2019.2938758},
DOI={10.1109/tpami.2019.2938758},
number={2},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
publisher={Institute of Electrical and Electronics Engineers (IEEE)},
author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip},
year={2021},
month={Feb},
pages={652–662}
}
```
<!--
Type: model-index
Collections:
- Name: Res2NeXt
Paper:
Title: 'Res2Net: A New Multi-scale Backbone Architecture'
URL: https://paperswithcode.com/paper/res2net-a-new-multi-scale-backbone
Models:
- Name: res2next50
In Collection: Res2NeXt
Metadata:
FLOPs: 5396798208
Parameters: 24670000
File Size: 99019592
Architecture:
- Batch Normalization
- Convolution
- Global Average Pooling
- ReLU
- Res2NeXt Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x Titan Xp GPUs
ID: res2next50
LR: 0.1
Epochs: 100
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/res2net.py#L207
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.24%
Top 5 Accuracy: 93.91%
-->
|
pytorch-image-models/hfdocs/source/models/res2next.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/res2next.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1711
}
| 201
|
# (Tensorflow) EfficientNet Lite
**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scales network width, depth, and resolution in a principled way.
The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2).
EfficientNet-Lite makes EfficientNet more suitable for mobile devices by introducing [ReLU6](https://paperswithcode.com/method/relu6) activation functions and removing [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation).
The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('tf_efficientnet_lite0', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `tf_efficientnet_lite0`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('tf_efficientnet_lite0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{tan2020efficientnet,
title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
author={Mingxing Tan and Quoc V. Le},
year={2020},
eprint={1905.11946},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
<!--
Type: model-index
Collections:
- Name: TF EfficientNet Lite
Paper:
Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks'
URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for
Models:
- Name: tf_efficientnet_lite0
In Collection: TF EfficientNet Lite
Metadata:
FLOPs: 488052032
Parameters: 4650000
File Size: 18820223
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- RELU6
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_lite0
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1596
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.83%
Top 5 Accuracy: 92.17%
- Name: tf_efficientnet_lite1
In Collection: TF EfficientNet Lite
Metadata:
FLOPs: 773639520
Parameters: 5420000
File Size: 21939331
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- RELU6
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_lite1
Crop Pct: '0.882'
Image Size: '240'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1607
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.67%
Top 5 Accuracy: 93.24%
- Name: tf_efficientnet_lite2
In Collection: TF EfficientNet Lite
Metadata:
FLOPs: 1068494432
Parameters: 6090000
File Size: 24658687
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- RELU6
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_lite2
Crop Pct: '0.89'
Image Size: '260'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1618
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.48%
Top 5 Accuracy: 93.75%
- Name: tf_efficientnet_lite3
In Collection: TF EfficientNet Lite
Metadata:
FLOPs: 2011534304
Parameters: 8199999
File Size: 33161413
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- RELU6
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_lite3
Crop Pct: '0.904'
Image Size: '300'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1629
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.83%
Top 5 Accuracy: 94.91%
- Name: tf_efficientnet_lite4
In Collection: TF EfficientNet Lite
Metadata:
FLOPs: 5164802912
Parameters: 13010000
File Size: 52558819
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- RELU6
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: tf_efficientnet_lite4
Crop Pct: '0.92'
Image Size: '380'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1640
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.54%
Top 5 Accuracy: 95.66%
-->
|
pytorch-image-models/hfdocs/source/models/tf-efficientnet-lite.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/tf-efficientnet-lite.mdx",
"repo_id": "pytorch-image-models",
"token_count": 3373
}
| 202
|
#!/usr/bin/env python3
"""PyTorch Inference Script
An example inference script that outputs top-k class ids for images in a folder into a csv.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import json
import logging
import os
import time
from contextlib import suppress
from functools import partial
import numpy as np
import pandas as pd
import torch
from timm.data import create_dataset, create_loader, resolve_data_config, ImageNetInfo, infer_imagenet_subset
from timm.layers import apply_test_time_pool
from timm.models import create_model
from timm.utils import AverageMeter, setup_default_logging, set_jit_fuser, ParseKwargs
try:
from apex import amp
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
_FMT_EXT = {
'json': '.json',
'json-record': '.json',
'json-split': '.json',
'parquet': '.parquet',
'csv': '.csv',
}
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('inference')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Inference')
parser.add_argument('data', nargs='?', metavar='DIR', const=None,
help='path to dataset (*deprecated*, use --data-dir)')
parser.add_argument('--data-dir', metavar='DIR',
help='path to dataset (root dir)')
parser.add_argument('--dataset', metavar='NAME', default='',
help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)')
parser.add_argument('--split', metavar='NAME', default='validation',
help='dataset split (default: validation)')
parser.add_argument('--model', '-m', metavar='MODEL', default='resnet50',
help='model architecture (default: resnet50)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--in-chans', type=int, default=None, metavar='N',
help='Image input channels (default: None => 3)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--use-train-size', action='store_true', default=False,
help='force use of train input size, even when test size is specified in pretrained cfg')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--crop-mode', default=None, type=str,
metavar='N', help='Input image crop mode (squash, border, center). Model default if None.')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--test-pool', dest='test_pool', action='store_true',
help='enable test time pool')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--device', default='cuda', type=str,
help="Device (accelerator) to use.")
parser.add_argument('--amp', action='store_true', default=False,
help='use Native AMP for mixed precision training')
parser.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16)')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs)
scripting_group = parser.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', default=False, action='store_true',
help='torch.jit.script the full model')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
scripting_group.add_argument('--aot-autograd', default=False, action='store_true',
help="Enable AOT Autograd support.")
parser.add_argument('--results-dir', type=str, default=None,
help='folder for output results')
parser.add_argument('--results-file', type=str, default=None,
help='results filename (relative to results-dir)')
parser.add_argument('--results-format', type=str, nargs='+', default=['csv'],
help='results format (one of "csv", "json", "json-split", "parquet")')
parser.add_argument('--results-separate-col', action='store_true', default=False,
help='separate output columns per result index.')
parser.add_argument('--topk', default=1, type=int,
metavar='N', help='Top-k to output to CSV')
parser.add_argument('--fullname', action='store_true', default=False,
help='use full sample name in output (not just basename).')
parser.add_argument('--filename-col', type=str, default='filename',
help='name for filename / sample name column')
parser.add_argument('--index-col', type=str, default='index',
help='name for output indices column(s)')
parser.add_argument('--label-col', type=str, default='label',
help='name for output indices column(s)')
parser.add_argument('--output-col', type=str, default=None,
help='name for logit/probs output column(s)')
parser.add_argument('--output-type', type=str, default='prob',
help='output type colum ("prob" for probabilities, "logit" for raw logits)')
parser.add_argument('--label-type', type=str, default='description',
help='type of label to output, one of "none", "name", "description", "detailed"')
parser.add_argument('--include-index', action='store_true', default=False,
help='include the class index in results')
parser.add_argument('--exclude-output', action='store_true', default=False,
help='exclude logits/probs from results, just indices. topk must be set !=0.')
def main():
setup_default_logging()
args = parser.parse_args()
# might as well try to do something useful...
args.pretrained = args.pretrained or not args.checkpoint
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
device = torch.device(args.device)
# resolve AMP arguments based on PyTorch / Apex availability
amp_autocast = suppress
if args.amp:
assert has_native_amp, 'Please update PyTorch to a version with native AMP (or use APEX).'
assert args.amp_dtype in ('float16', 'bfloat16')
amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16
amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype)
_logger.info('Running inference in mixed precision with native PyTorch AMP.')
else:
_logger.info('Running inference in float32. AMP not enabled.')
if args.fuser:
set_jit_fuser(args.fuser)
# create model
in_chans = 3
if args.in_chans is not None:
in_chans = args.in_chans
elif args.input_size is not None:
in_chans = args.input_size[0]
model = create_model(
args.model,
num_classes=args.num_classes,
in_chans=in_chans,
pretrained=args.pretrained,
checkpoint_path=args.checkpoint,
**args.model_kwargs,
)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
_logger.info(
f'Model {args.model} created, param count: {sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model)
test_time_pool = False
if args.test_pool:
model, test_time_pool = apply_test_time_pool(model, data_config)
model = model.to(device)
model.eval()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.torchscript:
model = torch.jit.script(model)
elif args.torchcompile:
assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.'
torch._dynamo.reset()
model = torch.compile(model, backend=args.torchcompile)
elif args.aot_autograd:
assert has_functorch, "functorch is needed for --aot-autograd"
model = memory_efficient_fusion(model)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
root_dir = args.data or args.data_dir
dataset = create_dataset(
root=root_dir,
name=args.dataset,
split=args.split,
class_map=args.class_map,
)
if test_time_pool:
data_config['crop_pct'] = 1.0
workers = 1 if 'tfds' in args.dataset or 'wds' in args.dataset else args.workers
loader = create_loader(
dataset,
batch_size=args.batch_size,
use_prefetcher=True,
num_workers=workers,
device=device,
**data_config,
)
to_label = None
if args.label_type in ('name', 'description', 'detail'):
imagenet_subset = infer_imagenet_subset(model)
if imagenet_subset is not None:
dataset_info = ImageNetInfo(imagenet_subset)
if args.label_type == 'name':
to_label = lambda x: dataset_info.index_to_label_name(x)
elif args.label_type == 'detail':
to_label = lambda x: dataset_info.index_to_description(x, detailed=True)
else:
to_label = lambda x: dataset_info.index_to_description(x)
to_label = np.vectorize(to_label)
else:
_logger.error("Cannot deduce ImageNet subset from model, no labelling will be performed.")
top_k = min(args.topk, args.num_classes)
batch_time = AverageMeter()
end = time.time()
all_indices = []
all_labels = []
all_outputs = []
use_probs = args.output_type == 'prob'
with torch.no_grad():
for batch_idx, (input, _) in enumerate(loader):
with amp_autocast():
output = model(input)
if use_probs:
output = output.softmax(-1)
if top_k:
output, indices = output.topk(top_k)
np_indices = indices.cpu().numpy()
if args.include_index:
all_indices.append(np_indices)
if to_label is not None:
np_labels = to_label(np_indices)
all_labels.append(np_labels)
all_outputs.append(output.cpu().numpy())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info('Predict: [{0}/{1}] Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
batch_idx, len(loader), batch_time=batch_time))
all_indices = np.concatenate(all_indices, axis=0) if all_indices else None
all_labels = np.concatenate(all_labels, axis=0) if all_labels else None
all_outputs = np.concatenate(all_outputs, axis=0).astype(np.float32)
filenames = loader.dataset.filenames(basename=not args.fullname)
output_col = args.output_col or ('prob' if use_probs else 'logit')
data_dict = {args.filename_col: filenames}
if args.results_separate_col and all_outputs.shape[-1] > 1:
if all_indices is not None:
for i in range(all_indices.shape[-1]):
data_dict[f'{args.index_col}_{i}'] = all_indices[:, i]
if all_labels is not None:
for i in range(all_labels.shape[-1]):
data_dict[f'{args.label_col}_{i}'] = all_labels[:, i]
for i in range(all_outputs.shape[-1]):
data_dict[f'{output_col}_{i}'] = all_outputs[:, i]
else:
if all_indices is not None:
if all_indices.shape[-1] == 1:
all_indices = all_indices.squeeze(-1)
data_dict[args.index_col] = list(all_indices)
if all_labels is not None:
if all_labels.shape[-1] == 1:
all_labels = all_labels.squeeze(-1)
data_dict[args.label_col] = list(all_labels)
if all_outputs.shape[-1] == 1:
all_outputs = all_outputs.squeeze(-1)
data_dict[output_col] = list(all_outputs)
df = pd.DataFrame(data=data_dict)
results_filename = args.results_file
if results_filename:
filename_no_ext, ext = os.path.splitext(results_filename)
if ext and ext in _FMT_EXT.values():
# if filename provided with one of expected ext,
# remove it as it will be added back
results_filename = filename_no_ext
else:
# base default filename on model name + img-size
img_size = data_config["input_size"][1]
results_filename = f'{args.model}-{img_size}'
if args.results_dir:
results_filename = os.path.join(args.results_dir, results_filename)
for fmt in args.results_format:
save_results(df, results_filename, fmt)
print(f'--result')
print(df.set_index(args.filename_col).to_json(orient='index', indent=4))
def save_results(df, results_filename, results_format='csv', filename_col='filename'):
results_filename += _FMT_EXT[results_format]
if results_format == 'parquet':
df.set_index(filename_col).to_parquet(results_filename)
elif results_format == 'json':
df.set_index(filename_col).to_json(results_filename, indent=4, orient='index')
elif results_format == 'json-records':
df.to_json(results_filename, lines=True, orient='records')
elif results_format == 'json-split':
df.to_json(results_filename, indent=4, orient='split', index=False)
else:
df.to_csv(results_filename, index=False)
if __name__ == '__main__':
main()
|
pytorch-image-models/inference.py/0
|
{
"file_path": "pytorch-image-models/inference.py",
"repo_id": "pytorch-image-models",
"token_count": 6815
}
| 203
|
import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
class OrderedDistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
class RepeatAugSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU). Heavily based on torch.utils.data.DistributedSampler
This sampler was taken from https://github.com/facebookresearch/deit/blob/0c4b8f60/samplers.py
Used in
Copyright (c) 2015-present, Facebook, Inc.
"""
def __init__(
self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
num_repeats=3,
selected_round=256,
selected_ratio=0,
):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.shuffle = shuffle
self.num_repeats = num_repeats
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * num_repeats / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# Determine the number of samples to select per epoch for each rank.
# num_selected logic defaults to be the same as original RASampler impl, but this one can be tweaked
# via selected_ratio and selected_round args.
selected_ratio = selected_ratio or num_replicas # ratio to reduce selected samples by, num_replicas if 0
if selected_round:
self.num_selected_samples = int(math.floor(
len(self.dataset) // selected_round * selected_round / selected_ratio))
else:
self.num_selected_samples = int(math.ceil(len(self.dataset) / selected_ratio))
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g)
else:
indices = torch.arange(start=0, end=len(self.dataset))
# produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....]
if isinstance(self.num_repeats, float) and not self.num_repeats.is_integer():
# resample for repeats w/ non-integer ratio
repeat_size = math.ceil(self.num_repeats * len(self.dataset))
indices = indices[torch.tensor([int(i // self.num_repeats) for i in range(repeat_size)])]
else:
indices = torch.repeat_interleave(indices, repeats=int(self.num_repeats), dim=0)
indices = indices.tolist() # leaving as tensor thrashes dataloader memory
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size > 0:
indices += indices[:padding_size]
assert len(indices) == self.total_size
# subsample per rank
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
# return up to num selected samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
pytorch-image-models/timm/data/distributed_sampler.py/0
|
{
"file_path": "pytorch-image-models/timm/data/distributed_sampler.py",
"repo_id": "pytorch-image-models",
"token_count": 2276
}
| 204
|
""" Dataset reader for webdataset
Hacked together by / Copyright 2022 Ross Wightman
"""
import io
import json
import logging
import math
import os
import random
import sys
from dataclasses import dataclass
from functools import partial
from itertools import islice
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
import torch.distributed as dist
import yaml
from PIL import Image
from torch.utils.data import Dataset, IterableDataset, get_worker_info
try:
import webdataset as wds
from webdataset.filters import _shuffle, getfirst
from webdataset.shardlists import expand_urls
from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample
except ImportError:
wds = None
expand_urls = None
from .class_map import load_class_map
from .reader import Reader
from .shared_count import SharedCount
_logger = logging.getLogger(__name__)
SAMPLE_SHUFFLE_SIZE = int(os.environ.get('WDS_SHUFFLE_SIZE', 8192))
SAMPLE_INITIAL_SIZE = int(os.environ.get('WDS_INITIAL_SIZE', 2048))
def _load_info(root, names=('_info.json', 'info.json')):
if isinstance(names, str):
names = (names,)
tried = []
err_str = ''
for n in names:
full_path = os.path.join(root, n)
try:
tried.append(full_path)
with wds.gopen(full_path) as f:
if n.endswith('.json'):
info_dict = json.load(f)
else:
info_dict = yaml.safe_load(f)
return info_dict
except Exception as e:
err_str = str(e)
_logger.warning(
f'Dataset info file not found at {tried}. Error: {err_str}. '
'Falling back to provided split and size arg.')
return {}
@dataclass
class SplitInfo:
num_samples: int
filenames: Tuple[str]
shard_lengths: Tuple[int] = ()
alt_label: str = ''
name: str = ''
def _parse_split_info(split: str, info: Dict):
def _info_convert(dict_info):
return SplitInfo(
num_samples=dict_info['num_samples'],
filenames=tuple(dict_info['filenames']),
shard_lengths=tuple(dict_info['shard_lengths']),
alt_label=dict_info.get('alt_label', ''),
name=dict_info['name'],
)
if 'tar' in split or '..' in split:
# split in WDS string braceexpand format, sample count can be included with a | separator
# ex: `dataset-split-{0000..9999}.tar|100000` for 9999 shards, covering 100,000 samples
split = split.split('|')
num_samples = 0
split_name = ''
if len(split) > 1:
num_samples = int(split[1])
split = split[0]
if '::' not in split:
split_parts = split.split('-', 3)
split_idx = len(split_parts) - 1
if split_idx and 'splits' in info and split_parts[split_idx] in info['splits']:
split_name = split_parts[split_idx]
split_filenames = expand_urls(split)
if split_name:
split_info = info['splits'][split_name]
if not num_samples:
_fc = {f: c for f, c in zip(split_info['filenames'], split_info['shard_lengths'])}
num_samples = sum(_fc[f] for f in split_filenames)
split_info['filenames'] = tuple(_fc.keys())
split_info['shard_lengths'] = tuple(_fc.values())
split_info['num_samples'] = num_samples
split_info = _info_convert(split_info)
else:
split_info = SplitInfo(
name=split_name,
num_samples=num_samples,
filenames=split_filenames,
)
else:
if 'splits' not in info or split not in info['splits']:
raise RuntimeError(f"split {split} not found in info ({info.get('splits', {}).keys()})")
split = split
split_info = info['splits'][split]
split_info = _info_convert(split_info)
return split_info
def log_and_continue(exn):
"""Call in an exception handler to ignore exceptions, isssue a warning, and continue."""
_logger.warning(f'Handling webdataset error ({repr(exn)}). Ignoring.')
# NOTE: try force an exit on errors that are clearly code / config and not transient
if isinstance(exn, TypeError):
raise exn
return True
def _decode(
sample,
image_key='jpg',
image_mode='RGB',
target_key='cls',
alt_label=''
):
""" Custom sample decode
* decode and convert PIL Image
* cls byte string label to int
* pass through JSON byte string (if it exists) without parse
"""
# decode class label, skip if alternate label not valid
if alt_label:
# alternative labels are encoded in json metadata
meta = json.loads(sample['json'])
class_label = int(meta[alt_label])
if class_label < 0:
# skipped labels currently encoded as -1, may change to a null/None value
return None
else:
class_label = int(sample[target_key])
# decode image
img = getfirst(sample, image_key)
with io.BytesIO(img) as b:
img = Image.open(b)
img.load()
if image_mode:
img = img.convert(image_mode)
# json passed through in undecoded state
decoded = dict(jpg=img, cls=class_label, json=sample.get('json', None))
return decoded
def pytorch_worker_seed():
"""get dataloader worker seed from pytorch"""
worker_info = get_worker_info()
if worker_info is not None:
# favour the seed already created for pytorch dataloader workers if it exists
return worker_info.seed
# fallback to wds rank based seed
return wds.utils.pytorch_worker_seed()
if wds is not None:
# conditional to avoid mandatory wds import (via inheritance of wds.PipelineStage)
class detshuffle2(wds.PipelineStage):
def __init__(
self,
bufsize=1000,
initial=100,
seed=0,
epoch=-1,
):
self.bufsize = bufsize
self.initial = initial
self.seed = seed
self.epoch = epoch
def run(self, src):
if isinstance(self.epoch, SharedCount):
epoch = self.epoch.value
else:
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
# situation as different workers may wrap at different times (or not at all).
self.epoch += 1
epoch = self.epoch
if self.seed < 0:
seed = pytorch_worker_seed() + epoch
else:
seed = self.seed + epoch
# _logger.info(f'shuffle seed: {self.seed}, {seed}, epoch: {epoch}') # FIXME temporary
rng = random.Random(seed)
return _shuffle(src, self.bufsize, self.initial, rng)
else:
detshuffle2 = None
class ResampledShards2(IterableDataset):
"""An iterable dataset yielding a list of urls."""
def __init__(
self,
urls,
nshards=sys.maxsize,
worker_seed=None,
deterministic=True,
epoch=-1,
):
"""Sample shards from the shard list with replacement.
:param urls: a list of URLs as a Python list or brace notation string
"""
super().__init__()
urls = wds.shardlists.expand_urls(urls)
self.urls = urls
assert isinstance(self.urls[0], str)
self.nshards = nshards
self.rng = random.Random()
self.worker_seed = pytorch_worker_seed if worker_seed is None else worker_seed
self.deterministic = deterministic
self.epoch = epoch
def __iter__(self):
"""Return an iterator over the shards."""
if isinstance(self.epoch, SharedCount):
epoch = self.epoch.value
else:
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
# situation as different workers may wrap at different times (or not at all).
self.epoch += 1
epoch = self.epoch
if self.deterministic:
# reset seed w/ epoch if deterministic, worker seed should be deterministic due to arg.seed
self.rng = random.Random(self.worker_seed() + epoch)
for _ in range(self.nshards):
index = self.rng.randint(0, len(self.urls) - 1)
yield dict(url=self.urls[index])
class ReaderWds(Reader):
def __init__(
self,
root: str,
name: Optional[str] = None,
split: str = 'train',
is_training: bool = False,
num_samples: Optional[int] = None,
batch_size: int = 1,
repeats: int = 0,
seed: int = 42,
class_map: Optional[dict] = None,
input_key: str = 'jpg;png;webp',
input_img_mode: str = 'RGB',
target_key: str = 'cls',
target_img_mode: str = '',
filename_key: str = 'filename',
sample_shuffle_size: Optional[int] = None,
smaple_initial_size: Optional[int] = None,
):
super().__init__()
if wds is None:
raise RuntimeError(
'Please install webdataset 0.2.x package `pip install git+https://github.com/webdataset/webdataset`.')
self.root = root
self.is_training = is_training
self.batch_size = batch_size
self.repeats = repeats
self.common_seed = seed # a seed that's fixed across all worker / distributed instances
self.shard_shuffle_size = 500
self.sample_shuffle_size = sample_shuffle_size or SAMPLE_SHUFFLE_SIZE
self.sample_initial_size = smaple_initial_size or SAMPLE_INITIAL_SIZE
self.input_key = input_key
self.input_img_mode = input_img_mode
self.target_key = target_key
self.filename_key = filename_key
self.key_ext = '.JPEG' # extension to add to key for original filenames (DS specific, default ImageNet)
self.info = _load_info(self.root)
self.split_info = _parse_split_info(split, self.info)
if num_samples is not None:
self.num_samples = num_samples
else:
self.num_samples = self.split_info.num_samples
if not self.num_samples:
raise RuntimeError(f'Invalid split definition, num_samples not specified.')
self.remap_class = False
if class_map:
self.class_to_idx = load_class_map(class_map)
self.remap_class = True
else:
self.class_to_idx = {}
# Distributed world state
self.dist_rank = 0
self.dist_num_replicas = 1
if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1:
self.dist_rank = dist.get_rank()
self.dist_num_replicas = dist.get_world_size()
# Attributes that are updated in _lazy_init
self.worker_info = None
self.worker_id = 0
self.worker_seed = seed # seed unique to each worker instance
self.num_workers = 1
self.global_worker_id = 0
self.global_num_workers = 1
self.init_count = 0
self.epoch_count = SharedCount()
# DataPipeline is lazy init, the majority of WDS DataPipeline could be init here, BUT, shuffle seed
# is not handled in manner where it can be deterministic for each worker AND initialized up front
self.ds = None
def set_epoch(self, count):
self.epoch_count.value = count
def set_loader_cfg(
self,
num_workers: Optional[int] = None,
):
if self.ds is not None:
return
if num_workers is not None:
self.num_workers = num_workers
self.global_num_workers = self.dist_num_replicas * self.num_workers
def _lazy_init(self):
""" Lazily initialize worker (in worker processes)
"""
if self.worker_info is None:
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
self.worker_info = worker_info
self.worker_id = worker_info.id
self.worker_seed = worker_info.seed
self.num_workers = worker_info.num_workers
self.global_num_workers = self.dist_num_replicas * self.num_workers
self.global_worker_id = self.dist_rank * self.num_workers + self.worker_id
# init data pipeline
abs_shard_filenames = [os.path.join(self.root, f) for f in self.split_info.filenames]
pipeline = [wds.SimpleShardList(abs_shard_filenames)]
# at this point we have an iterator over all the shards
if self.is_training:
pipeline.extend([
detshuffle2(
self.shard_shuffle_size,
seed=self.common_seed,
epoch=self.epoch_count,
),
self._split_by_node_and_worker,
# at this point, we have an iterator over the shards assigned to each worker
wds.tarfile_to_samples(handler=log_and_continue),
wds.shuffle(
bufsize=self.sample_shuffle_size,
initial=self.sample_initial_size,
rng=random.Random(self.worker_seed) # this is why we lazy-init whole DataPipeline
),
])
else:
pipeline.extend([
self._split_by_node_and_worker,
# at this point, we have an iterator over the shards assigned to each worker
wds.tarfile_to_samples(handler=log_and_continue),
])
pipeline.extend([
wds.map(
partial(
_decode,
image_key=self.input_key,
image_mode=self.input_img_mode,
alt_label=self.split_info.alt_label,
),
handler=log_and_continue,
),
wds.rename(image=self.input_key, target=self.target_key)
])
self.ds = wds.DataPipeline(*pipeline)
def _split_by_node_and_worker(self, src):
if self.global_num_workers > 1:
for s in islice(src, self.global_worker_id, None, self.global_num_workers):
yield s
else:
for s in src:
yield s
def _num_samples_per_worker(self):
num_worker_samples = self.num_samples / max(self.global_num_workers, self.dist_num_replicas)
if self.is_training or self.dist_num_replicas > 1:
num_worker_samples = math.ceil(num_worker_samples)
if self.is_training:
num_worker_samples = math.ceil(num_worker_samples / self.batch_size) * self.batch_size
return int(num_worker_samples)
def __iter__(self):
if self.ds is None:
self._lazy_init()
num_worker_samples = self._num_samples_per_worker()
if self.is_training or self.dist_num_replicas > 1:
# NOTE: doing distributed validation w/ WDS is messy, hard to meet constraints that
# same # of batches needed across all replicas w/ seeing each sample once.
# with_epoch() is simple but could miss a shard's worth of samples in some workers,
# and duplicate in others. Best to keep num DL workers low and a divisor of #val shards.
ds = self.ds.with_epoch(num_worker_samples)
else:
ds = self.ds
i = 0
# _logger.info(f'start {i}, {self.worker_id}') # FIXME temporary debug
for sample in ds:
target = sample['target']
if self.remap_class:
target = self.class_to_idx[target]
yield sample['image'], target
i += 1
# _logger.info(f'end {i}, {self.worker_id}') # FIXME temporary debug
def __len__(self):
num_samples = self._num_samples_per_worker() * self.num_workers
return num_samples
def _filename(self, index, basename=False, absolute=False):
assert False, "Not supported" # no random access to examples
def filenames(self, basename=False, absolute=False):
""" Return all filenames in dataset, overrides base"""
if self.ds is None:
self._lazy_init()
names = []
for sample in self.ds:
if self.filename_key in sample:
name = sample[self.filename_key]
elif '__key__' in sample:
name = sample['__key__'] + self.key_ext
else:
assert False, "No supported name field present"
names.append(name)
if len(names) >= self.num_samples:
break # safety for ds.repeat() case
return names
|
pytorch-image-models/timm/data/readers/reader_wds.py/0
|
{
"file_path": "pytorch-image-models/timm/data/readers/reader_wds.py",
"repo_id": "pytorch-image-models",
"token_count": 7878
}
| 205
|
""" Classifier head and layer factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from collections import OrderedDict
from functools import partial
from typing import Optional, Union, Callable
import torch
import torch.nn as nn
from torch.nn import functional as F
from .adaptive_avgmax_pool import SelectAdaptivePool2d
from .create_act import get_act_layer
from .create_norm import get_norm_layer
def _create_pool(
num_features: int,
num_classes: int,
pool_type: str = 'avg',
use_conv: bool = False,
input_fmt: Optional[str] = None,
):
flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling
if not pool_type:
flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling)
global_pool = SelectAdaptivePool2d(
pool_type=pool_type,
flatten=flatten_in_pool,
input_fmt=input_fmt,
)
num_pooled_features = num_features * global_pool.feat_mult()
return global_pool, num_pooled_features
def _create_fc(num_features, num_classes, use_conv=False):
if num_classes <= 0:
fc = nn.Identity() # pass-through (no classifier)
elif use_conv:
fc = nn.Conv2d(num_features, num_classes, 1, bias=True)
else:
fc = nn.Linear(num_features, num_classes, bias=True)
return fc
def create_classifier(
num_features: int,
num_classes: int,
pool_type: str = 'avg',
use_conv: bool = False,
input_fmt: str = 'NCHW',
drop_rate: Optional[float] = None,
):
global_pool, num_pooled_features = _create_pool(
num_features,
num_classes,
pool_type,
use_conv=use_conv,
input_fmt=input_fmt,
)
fc = _create_fc(
num_pooled_features,
num_classes,
use_conv=use_conv,
)
if drop_rate is not None:
dropout = nn.Dropout(drop_rate)
return global_pool, dropout, fc
return global_pool, fc
class ClassifierHead(nn.Module):
"""Classifier head w/ configurable global pooling and dropout."""
def __init__(
self,
in_features: int,
num_classes: int,
pool_type: str = 'avg',
drop_rate: float = 0.,
use_conv: bool = False,
input_fmt: str = 'NCHW',
):
"""
Args:
in_features: The number of input features.
num_classes: The number of classes for the final classifier layer (output).
pool_type: Global pooling type, pooling disabled if empty string ('').
drop_rate: Pre-classifier dropout rate.
"""
super(ClassifierHead, self).__init__()
self.in_features = in_features
self.use_conv = use_conv
self.input_fmt = input_fmt
global_pool, fc = create_classifier(
in_features,
num_classes,
pool_type,
use_conv=use_conv,
input_fmt=input_fmt,
)
self.global_pool = global_pool
self.drop = nn.Dropout(drop_rate)
self.fc = fc
self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity()
def reset(self, num_classes: int, pool_type: Optional[str] = None):
if pool_type is not None and pool_type != self.global_pool.pool_type:
self.global_pool, self.fc = create_classifier(
self.in_features,
num_classes,
pool_type=pool_type,
use_conv=self.use_conv,
input_fmt=self.input_fmt,
)
self.flatten = nn.Flatten(1) if self.use_conv and pool_type else nn.Identity()
else:
num_pooled_features = self.in_features * self.global_pool.feat_mult()
self.fc = _create_fc(
num_pooled_features,
num_classes,
use_conv=self.use_conv,
)
def forward(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.drop(x)
if pre_logits:
return self.flatten(x)
x = self.fc(x)
return self.flatten(x)
class NormMlpClassifierHead(nn.Module):
""" A Pool -> Norm -> Mlp Classifier Head for '2D' NCHW tensors
"""
def __init__(
self,
in_features: int,
num_classes: int,
hidden_size: Optional[int] = None,
pool_type: str = 'avg',
drop_rate: float = 0.,
norm_layer: Union[str, Callable] = 'layernorm2d',
act_layer: Union[str, Callable] = 'tanh',
):
"""
Args:
in_features: The number of input features.
num_classes: The number of classes for the final classifier layer (output).
hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None.
pool_type: Global pooling type, pooling disabled if empty string ('').
drop_rate: Pre-classifier dropout rate.
norm_layer: Normalization layer type.
act_layer: MLP activation layer type (only used if hidden_size is not None).
"""
super().__init__()
self.in_features = in_features
self.hidden_size = hidden_size
self.num_features = in_features
self.use_conv = not pool_type
norm_layer = get_norm_layer(norm_layer)
act_layer = get_act_layer(act_layer)
linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear
self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
self.norm = norm_layer(in_features)
self.flatten = nn.Flatten(1) if pool_type else nn.Identity()
if hidden_size:
self.pre_logits = nn.Sequential(OrderedDict([
('fc', linear_layer(in_features, hidden_size)),
('act', act_layer()),
]))
self.num_features = hidden_size
else:
self.pre_logits = nn.Identity()
self.drop = nn.Dropout(drop_rate)
self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def reset(self, num_classes: int, pool_type: Optional[str] = None):
if pool_type is not None:
self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
self.flatten = nn.Flatten(1) if pool_type else nn.Identity()
self.use_conv = self.global_pool.is_identity()
linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear
if self.hidden_size:
if ((isinstance(self.pre_logits.fc, nn.Conv2d) and not self.use_conv) or
(isinstance(self.pre_logits.fc, nn.Linear) and self.use_conv)):
with torch.no_grad():
new_fc = linear_layer(self.in_features, self.hidden_size)
new_fc.weight.copy_(self.pre_logits.fc.weight.reshape(new_fc.weight.shape))
new_fc.bias.copy_(self.pre_logits.fc.bias)
self.pre_logits.fc = new_fc
self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.norm(x)
x = self.flatten(x)
x = self.pre_logits(x)
x = self.drop(x)
if pre_logits:
return x
x = self.fc(x)
return x
class ClNormMlpClassifierHead(nn.Module):
""" A Pool -> Norm -> Mlp Classifier Head for n-D NxxC tensors
"""
def __init__(
self,
in_features: int,
num_classes: int,
hidden_size: Optional[int] = None,
pool_type: str = 'avg',
drop_rate: float = 0.,
norm_layer: Union[str, Callable] = 'layernorm',
act_layer: Union[str, Callable] = 'gelu',
input_fmt: str = 'NHWC',
):
"""
Args:
in_features: The number of input features.
num_classes: The number of classes for the final classifier layer (output).
hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None.
pool_type: Global pooling type, pooling disabled if empty string ('').
drop_rate: Pre-classifier dropout rate.
norm_layer: Normalization layer type.
act_layer: MLP activation layer type (only used if hidden_size is not None).
"""
super().__init__()
self.in_features = in_features
self.hidden_size = hidden_size
self.num_features = in_features
assert pool_type in ('', 'avg', 'max', 'avgmax')
self.pool_type = pool_type
assert input_fmt in ('NHWC', 'NLC')
self.pool_dim = 1 if input_fmt == 'NLC' else (1, 2)
norm_layer = get_norm_layer(norm_layer)
act_layer = get_act_layer(act_layer)
self.norm = norm_layer(in_features)
if hidden_size:
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(in_features, hidden_size)),
('act', act_layer()),
]))
self.num_features = hidden_size
else:
self.pre_logits = nn.Identity()
self.drop = nn.Dropout(drop_rate)
self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def reset(self, num_classes: int, pool_type: Optional[str] = None, reset_other: bool = False):
if pool_type is not None:
self.pool_type = pool_type
if reset_other:
self.pre_logits = nn.Identity()
self.norm = nn.Identity()
self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def _global_pool(self, x):
if self.pool_type:
if self.pool_type == 'avg':
x = x.mean(dim=self.pool_dim)
elif self.pool_type == 'max':
x = x.amax(dim=self.pool_dim)
elif self.pool_type == 'avgmax':
x = 0.5 * (x.amax(dim=self.pool_dim) + x.mean(dim=self.pool_dim))
return x
def forward(self, x, pre_logits: bool = False):
x = self._global_pool(x)
x = self.norm(x)
x = self.pre_logits(x)
x = self.drop(x)
if pre_logits:
return x
x = self.fc(x)
return x
|
pytorch-image-models/timm/layers/classifier.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/classifier.py",
"repo_id": "pytorch-image-models",
"token_count": 5047
}
| 206
|
""" Gather-Excite Attention Block
Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348
Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet
I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another
impl that covers all of the cases.
NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation
Hacked together by / Copyright 2021 Ross Wightman
"""
import math
from torch import nn as nn
import torch.nn.functional as F
from .create_act import create_act_layer, get_act_layer
from .create_conv2d import create_conv2d
from .helpers import make_divisible
from .mlp import ConvMlp
class GatherExcite(nn.Module):
""" Gather-Excite Attention Module
"""
def __init__(
self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True,
rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False,
act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'):
super(GatherExcite, self).__init__()
self.add_maxpool = add_maxpool
act_layer = get_act_layer(act_layer)
self.extent = extent
if extra_params:
self.gather = nn.Sequential()
if extent == 0:
assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params'
self.gather.add_module(
'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True))
if norm_layer:
self.gather.add_module(f'norm1', nn.BatchNorm2d(channels))
else:
assert extent % 2 == 0
num_conv = int(math.log2(extent))
for i in range(num_conv):
self.gather.add_module(
f'conv{i + 1}',
create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True))
if norm_layer:
self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels))
if i != num_conv - 1:
self.gather.add_module(f'act{i + 1}', act_layer(inplace=True))
else:
self.gather = None
if self.extent == 0:
self.gk = 0
self.gs = 0
else:
assert extent % 2 == 0
self.gk = self.extent * 2 - 1
self.gs = self.extent
if not rd_channels:
rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity()
self.gate = create_act_layer(gate_layer)
def forward(self, x):
size = x.shape[-2:]
if self.gather is not None:
x_ge = self.gather(x)
else:
if self.extent == 0:
# global extent
x_ge = x.mean(dim=(2, 3), keepdims=True)
if self.add_maxpool:
# experimental codepath, may remove or change
x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True)
else:
x_ge = F.avg_pool2d(
x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False)
if self.add_maxpool:
# experimental codepath, may remove or change
x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2)
x_ge = self.mlp(x_ge)
if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1:
x_ge = F.interpolate(x_ge, size=size)
return x * self.gate(x_ge)
|
pytorch-image-models/timm/layers/gather_excite.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/gather_excite.py",
"repo_id": "pytorch-image-models",
"token_count": 1956
}
| 207
|
""" Bilinear-Attention-Transform and Non-Local Attention
Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms`
- https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html
Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification
"""
import torch
from torch import nn
from torch.nn import functional as F
from .conv_bn_act import ConvNormAct
from .helpers import make_divisible
from .trace_utils import _assert
class NonLocalAttn(nn.Module):
"""Spatial NL block for image classification.
This was adapted from https://github.com/BA-Transform/BAT-Image-Classification
Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net.
"""
def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs):
super(NonLocalAttn, self).__init__()
if rd_channels is None:
rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor)
self.scale = in_channels ** -0.5 if use_scale else 1.0
self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True)
self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True)
self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True)
self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True)
self.norm = nn.BatchNorm2d(in_channels)
self.reset_parameters()
def forward(self, x):
shortcut = x
t = self.t(x)
p = self.p(x)
g = self.g(x)
B, C, H, W = t.size()
t = t.view(B, C, -1).permute(0, 2, 1)
p = p.view(B, C, -1)
g = g.view(B, C, -1).permute(0, 2, 1)
att = torch.bmm(t, p) * self.scale
att = F.softmax(att, dim=2)
x = torch.bmm(att, g)
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.z(x)
x = self.norm(x) + shortcut
return x
def reset_parameters(self):
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
if len(list(m.parameters())) > 1:
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 0)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 0)
nn.init.constant_(m.bias, 0)
class BilinearAttnTransform(nn.Module):
def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
super(BilinearAttnTransform, self).__init__()
self.conv1 = ConvNormAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer)
self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1))
self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size))
self.conv2 = ConvNormAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer)
self.block_size = block_size
self.groups = groups
self.in_channels = in_channels
def resize_mat(self, x, t: int):
B, C, block_size, block_size1 = x.shape
_assert(block_size == block_size1, '')
if t <= 1:
return x
x = x.view(B * C, -1, 1, 1)
x = x * torch.eye(t, t, dtype=x.dtype, device=x.device)
x = x.view(B * C, block_size, block_size, t, t)
x = torch.cat(torch.split(x, 1, dim=1), dim=3)
x = torch.cat(torch.split(x, 1, dim=2), dim=4)
x = x.view(B, C, block_size * t, block_size * t)
return x
def forward(self, x):
_assert(x.shape[-1] % self.block_size == 0, '')
_assert(x.shape[-2] % self.block_size == 0, '')
B, C, H, W = x.shape
out = self.conv1(x)
rp = F.adaptive_max_pool2d(out, (self.block_size, 1))
cp = F.adaptive_max_pool2d(out, (1, self.block_size))
p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid()
q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid()
p = p / p.sum(dim=3, keepdim=True)
q = q / q.sum(dim=2, keepdim=True)
p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size(
0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous()
p = p.view(B, C, self.block_size, self.block_size)
q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size(
0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous()
q = q.view(B, C, self.block_size, self.block_size)
p = self.resize_mat(p, H // self.block_size)
q = self.resize_mat(q, W // self.block_size)
y = p.matmul(x)
y = y.matmul(q)
y = self.conv2(y)
return y
class BatNonLocalAttn(nn.Module):
""" BAT
Adapted from: https://github.com/BA-Transform/BAT-Image-Classification
"""
def __init__(
self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8,
drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_):
super().__init__()
if rd_channels is None:
rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor)
self.conv1 = ConvNormAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer)
self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer)
self.conv2 = ConvNormAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer)
self.dropout = nn.Dropout2d(p=drop_rate)
def forward(self, x):
xl = self.conv1(x)
y = self.ba(xl)
y = self.conv2(y)
y = self.dropout(y)
return y + x
|
pytorch-image-models/timm/layers/non_local_attn.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/non_local_attn.py",
"repo_id": "pytorch-image-models",
"token_count": 3028
}
| 208
|
""" Convolution with Weight Standardization (StdConv and ScaledStdConv)
StdConv:
@article{weightstandardization,
author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille},
title = {Weight Standardization},
journal = {arXiv preprint arXiv:1903.10520},
year = {2019},
}
Code: https://github.com/joe-siyuan-qiao/WeightStandardization
ScaledStdConv:
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets
Hacked together by / copyright Ross Wightman, 2021.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .padding import get_padding, get_padding_value, pad_same
class StdConv2d(nn.Conv2d):
"""Conv2d with Weight Standardization. Used for BiT ResNet-V2 models.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(
self, in_channel, out_channels, kernel_size, stride=1, padding=None,
dilation=1, groups=1, bias=False, eps=1e-6):
if padding is None:
padding = get_padding(kernel_size, stride, dilation)
super().__init__(
in_channel, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
self.eps = eps
def forward(self, x):
weight = F.batch_norm(
self.weight.reshape(1, self.out_channels, -1), None, None,
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class StdConv2dSame(nn.Conv2d):
"""Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(
self, in_channel, out_channels, kernel_size, stride=1, padding='SAME',
dilation=1, groups=1, bias=False, eps=1e-6):
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
super().__init__(
in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.same_pad = is_dynamic
self.eps = eps
def forward(self, x):
if self.same_pad:
x = pad_same(x, self.kernel_size, self.stride, self.dilation)
weight = F.batch_norm(
self.weight.reshape(1, self.out_channels, -1), None, None,
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class ScaledStdConv2d(nn.Conv2d):
"""Conv2d layer with Scaled Weight Standardization.
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` -
https://arxiv.org/abs/2101.08692
NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, padding=None,
dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0):
if padding is None:
padding = get_padding(kernel_size, stride, dilation)
super().__init__(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init))
self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in)
self.eps = eps
def forward(self, x):
weight = F.batch_norm(
self.weight.reshape(1, self.out_channels, -1), None, None,
weight=(self.gain * self.scale).view(-1),
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class ScaledStdConv2dSame(nn.Conv2d):
"""Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` -
https://arxiv.org/abs/2101.08692
NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, padding='SAME',
dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0):
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
super().__init__(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init))
self.scale = gamma * self.weight[0].numel() ** -0.5
self.same_pad = is_dynamic
self.eps = eps
def forward(self, x):
if self.same_pad:
x = pad_same(x, self.kernel_size, self.stride, self.dilation)
weight = F.batch_norm(
self.weight.reshape(1, self.out_channels, -1), None, None,
weight=(self.gain * self.scale).view(-1),
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
|
pytorch-image-models/timm/layers/std_conv.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/std_conv.py",
"repo_id": "pytorch-image-models",
"token_count": 2483
}
| 209
|
""" PyTorch FX Based Feature Extraction Helpers
Using https://pytorch.org/vision/stable/feature_extraction.html
"""
from typing import Callable, Dict, List, Optional, Union, Tuple, Type
import torch
from torch import nn
from ._features import _get_feature_info, _get_return_layers
try:
# NOTE we wrap torchvision fns to use timm leaf / no trace definitions
from torchvision.models.feature_extraction import create_feature_extractor as _create_feature_extractor
from torchvision.models.feature_extraction import get_graph_node_names as _get_graph_node_names
has_fx_feature_extraction = True
except ImportError:
has_fx_feature_extraction = False
# Layers we went to treat as leaf modules
from timm.layers import Conv2dSame, ScaledStdConv2dSame, CondConv2d, StdConv2dSame, Format
from timm.layers import resample_abs_pos_embed, resample_abs_pos_embed_nhwc
from timm.layers.non_local_attn import BilinearAttnTransform
from timm.layers.pool2d_same import MaxPool2dSame, AvgPool2dSame
from timm.layers.norm_act import (
BatchNormAct2d,
SyncBatchNormAct,
FrozenBatchNormAct2d,
GroupNormAct,
GroupNorm1Act,
LayerNormAct,
LayerNormAct2d
)
__all__ = ['register_notrace_module', 'is_notrace_module', 'get_notrace_modules',
'register_notrace_function', 'is_notrace_function', 'get_notrace_functions',
'create_feature_extractor', 'get_graph_node_names', 'FeatureGraphNet', 'GraphExtractNet']
# NOTE: By default, any modules from timm.models.layers that we want to treat as leaf modules go here
# BUT modules from timm.models should use the registration mechanism below
_leaf_modules = {
BilinearAttnTransform, # reason: flow control t <= 1
# Reason: get_same_padding has a max which raises a control flow error
Conv2dSame, MaxPool2dSame, ScaledStdConv2dSame, StdConv2dSame, AvgPool2dSame,
CondConv2d, # reason: TypeError: F.conv2d received Proxy in groups=self.groups * B (because B = x.shape[0]),
BatchNormAct2d,
SyncBatchNormAct,
FrozenBatchNormAct2d,
GroupNormAct,
GroupNorm1Act,
LayerNormAct,
LayerNormAct2d,
}
try:
from timm.layers import InplaceAbn
_leaf_modules.add(InplaceAbn)
except ImportError:
pass
def register_notrace_module(module: Type[nn.Module]):
"""
Any module not under timm.models.layers should get this decorator if we don't want to trace through it.
"""
_leaf_modules.add(module)
return module
def is_notrace_module(module: Type[nn.Module]):
return module in _leaf_modules
def get_notrace_modules():
return list(_leaf_modules)
# Functions we want to autowrap (treat them as leaves)
_autowrap_functions = {
resample_abs_pos_embed,
resample_abs_pos_embed_nhwc,
}
def register_notrace_function(func: Callable):
"""
Decorator for functions which ought not to be traced through
"""
_autowrap_functions.add(func)
return func
def is_notrace_function(func: Callable):
return func in _autowrap_functions
def get_notrace_functions():
return list(_autowrap_functions)
def get_graph_node_names(model: nn.Module) -> Tuple[List[str], List[str]]:
return _get_graph_node_names(
model,
tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)}
)
def create_feature_extractor(model: nn.Module, return_nodes: Union[Dict[str, str], List[str]]):
assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction'
return _create_feature_extractor(
model, return_nodes,
tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)}
)
class FeatureGraphNet(nn.Module):
""" A FX Graph based feature extractor that works with the model feature_info metadata
"""
return_dict: torch.jit.Final[bool]
def __init__(
self,
model: nn.Module,
out_indices: Tuple[int, ...],
out_map: Optional[Dict] = None,
output_fmt: str = 'NCHW',
return_dict: bool = False,
):
super().__init__()
assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction'
self.feature_info = _get_feature_info(model, out_indices)
if out_map is not None:
assert len(out_map) == len(out_indices)
self.output_fmt = Format(output_fmt)
return_nodes = _get_return_layers(self.feature_info, out_map)
self.graph_module = create_feature_extractor(model, return_nodes)
self.return_dict = return_dict
def forward(self, x):
out = self.graph_module(x)
if self.return_dict:
return out
return list(out.values())
class GraphExtractNet(nn.Module):
""" A standalone feature extraction wrapper that maps dict -> list or single tensor
NOTE:
* one can use feature_extractor directly if dictionary output is desired
* unlike FeatureGraphNet, this is intended to be used standalone and not with model feature_info
metadata for builtin feature extraction mode
* create_feature_extractor can be used directly if dictionary output is acceptable
Args:
model: model to extract features from
return_nodes: node names to return features from (dict or list)
squeeze_out: if only one output, and output in list format, flatten to single tensor
return_dict: return as dictionary from extractor with node names as keys, ignores squeeze_out arg
"""
return_dict: torch.jit.Final[bool]
def __init__(
self,
model: nn.Module,
return_nodes: Union[Dict[str, str], List[str]],
squeeze_out: bool = True,
return_dict: bool = False,
):
super().__init__()
self.squeeze_out = squeeze_out
self.graph_module = create_feature_extractor(model, return_nodes)
self.return_dict = return_dict
def forward(self, x) -> Union[List[torch.Tensor], torch.Tensor]:
out = self.graph_module(x)
if self.return_dict:
return out
out = list(out.values())
return out[0] if self.squeeze_out and len(out) == 1 else out
|
pytorch-image-models/timm/models/_features_fx.py/0
|
{
"file_path": "pytorch-image-models/timm/models/_features_fx.py",
"repo_id": "pytorch-image-models",
"token_count": 2402
}
| 210
|
"""
CoaT architecture.
Paper: Co-Scale Conv-Attentional Image Transformers - https://arxiv.org/abs/2104.06399
Official CoaT code at: https://github.com/mlpc-ucsd/CoaT
Modified from timm/models/vision_transformer.py
"""
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, LayerNorm
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['CoaT']
class ConvRelPosEnc(nn.Module):
""" Convolutional relative position encoding. """
def __init__(self, head_chs, num_heads, window):
"""
Initialization.
Ch: Channels per head.
h: Number of heads.
window: Window size(s) in convolutional relative positional encoding. It can have two forms:
1. An integer of window size, which assigns all attention heads with the same window s
size in ConvRelPosEnc.
2. A dict mapping window size to #attention head splits (
e.g. {window size 1: #attention head split 1, window size 2: #attention head split 2})
It will apply different window size to the attention head splits.
"""
super().__init__()
if isinstance(window, int):
# Set the same window size for all attention heads.
window = {window: num_heads}
self.window = window
elif isinstance(window, dict):
self.window = window
else:
raise ValueError()
self.conv_list = nn.ModuleList()
self.head_splits = []
for cur_window, cur_head_split in window.items():
dilation = 1
# Determine padding size.
# Ref: https://discuss.pytorch.org/t/how-to-keep-the-shape-of-input-and-output-same-when-dilation-conv/14338
padding_size = (cur_window + (cur_window - 1) * (dilation - 1)) // 2
cur_conv = nn.Conv2d(
cur_head_split * head_chs,
cur_head_split * head_chs,
kernel_size=(cur_window, cur_window),
padding=(padding_size, padding_size),
dilation=(dilation, dilation),
groups=cur_head_split * head_chs,
)
self.conv_list.append(cur_conv)
self.head_splits.append(cur_head_split)
self.channel_splits = [x * head_chs for x in self.head_splits]
def forward(self, q, v, size: Tuple[int, int]):
B, num_heads, N, C = q.shape
H, W = size
_assert(N == 1 + H * W, '')
# Convolutional relative position encoding.
q_img = q[:, :, 1:, :] # [B, h, H*W, Ch]
v_img = v[:, :, 1:, :] # [B, h, H*W, Ch]
v_img = v_img.transpose(-1, -2).reshape(B, num_heads * C, H, W)
v_img_list = torch.split(v_img, self.channel_splits, dim=1) # Split according to channels
conv_v_img_list = []
for i, conv in enumerate(self.conv_list):
conv_v_img_list.append(conv(v_img_list[i]))
conv_v_img = torch.cat(conv_v_img_list, dim=1)
conv_v_img = conv_v_img.reshape(B, num_heads, C, H * W).transpose(-1, -2)
EV_hat = q_img * conv_v_img
EV_hat = F.pad(EV_hat, (0, 0, 1, 0, 0, 0)) # [B, h, N, Ch].
return EV_hat
class FactorAttnConvRelPosEnc(nn.Module):
""" Factorized attention with convolutional relative position encoding class. """
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
shared_crpe=None,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop) # Note: attn_drop is actually not used.
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
# Shared convolutional relative position encoding.
self.crpe = shared_crpe
def forward(self, x, size: Tuple[int, int]):
B, N, C = x.shape
# Generate Q, K, V.
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # [B, h, N, Ch]
# Factorized attention.
k_softmax = k.softmax(dim=2)
factor_att = k_softmax.transpose(-1, -2) @ v
factor_att = q @ factor_att
# Convolutional relative position encoding.
crpe = self.crpe(q, v, size=size) # [B, h, N, Ch]
# Merge and reshape.
x = self.scale * factor_att + crpe
x = x.transpose(1, 2).reshape(B, N, C) # [B, h, N, Ch] -> [B, N, h, Ch] -> [B, N, C]
# Output projection.
x = self.proj(x)
x = self.proj_drop(x)
return x
class ConvPosEnc(nn.Module):
""" Convolutional Position Encoding.
Note: This module is similar to the conditional position encoding in CPVT.
"""
def __init__(self, dim, k=3):
super(ConvPosEnc, self).__init__()
self.proj = nn.Conv2d(dim, dim, k, 1, k//2, groups=dim)
def forward(self, x, size: Tuple[int, int]):
B, N, C = x.shape
H, W = size
_assert(N == 1 + H * W, '')
# Extract CLS token and image tokens.
cls_token, img_tokens = x[:, :1], x[:, 1:] # [B, 1, C], [B, H*W, C]
# Depthwise convolution.
feat = img_tokens.transpose(1, 2).view(B, C, H, W)
x = self.proj(feat) + feat
x = x.flatten(2).transpose(1, 2)
# Combine with CLS token.
x = torch.cat((cls_token, x), dim=1)
return x
class SerialBlock(nn.Module):
""" Serial block class.
Note: In this implementation, each serial block only contains a conv-attention and a FFN (MLP) module. """
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
shared_cpe=None,
shared_crpe=None,
):
super().__init__()
# Conv-Attention.
self.cpe = shared_cpe
self.norm1 = norm_layer(dim)
self.factoratt_crpe = FactorAttnConvRelPosEnc(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpe,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# MLP.
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
def forward(self, x, size: Tuple[int, int]):
# Conv-Attention.
x = self.cpe(x, size)
cur = self.norm1(x)
cur = self.factoratt_crpe(cur, size)
x = x + self.drop_path(cur)
# MLP.
cur = self.norm2(x)
cur = self.mlp(cur)
x = x + self.drop_path(cur)
return x
class ParallelBlock(nn.Module):
""" Parallel block class. """
def __init__(
self,
dims,
num_heads,
mlp_ratios=[],
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
shared_crpes=None,
):
super().__init__()
# Conv-Attention.
self.norm12 = norm_layer(dims[1])
self.norm13 = norm_layer(dims[2])
self.norm14 = norm_layer(dims[3])
self.factoratt_crpe2 = FactorAttnConvRelPosEnc(
dims[1],
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpes[1],
)
self.factoratt_crpe3 = FactorAttnConvRelPosEnc(
dims[2],
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpes[2],
)
self.factoratt_crpe4 = FactorAttnConvRelPosEnc(
dims[3],
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpes[3],
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# MLP.
self.norm22 = norm_layer(dims[1])
self.norm23 = norm_layer(dims[2])
self.norm24 = norm_layer(dims[3])
# In parallel block, we assume dimensions are the same and share the linear transformation.
assert dims[1] == dims[2] == dims[3]
assert mlp_ratios[1] == mlp_ratios[2] == mlp_ratios[3]
mlp_hidden_dim = int(dims[1] * mlp_ratios[1])
self.mlp2 = self.mlp3 = self.mlp4 = Mlp(
in_features=dims[1],
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
def upsample(self, x, factor: float, size: Tuple[int, int]):
""" Feature map up-sampling. """
return self.interpolate(x, scale_factor=factor, size=size)
def downsample(self, x, factor: float, size: Tuple[int, int]):
""" Feature map down-sampling. """
return self.interpolate(x, scale_factor=1.0/factor, size=size)
def interpolate(self, x, scale_factor: float, size: Tuple[int, int]):
""" Feature map interpolation. """
B, N, C = x.shape
H, W = size
_assert(N == 1 + H * W, '')
cls_token = x[:, :1, :]
img_tokens = x[:, 1:, :]
img_tokens = img_tokens.transpose(1, 2).reshape(B, C, H, W)
img_tokens = F.interpolate(
img_tokens,
scale_factor=scale_factor,
recompute_scale_factor=False,
mode='bilinear',
align_corners=False,
)
img_tokens = img_tokens.reshape(B, C, -1).transpose(1, 2)
out = torch.cat((cls_token, img_tokens), dim=1)
return out
def forward(self, x1, x2, x3, x4, sizes: List[Tuple[int, int]]):
_, S2, S3, S4 = sizes
cur2 = self.norm12(x2)
cur3 = self.norm13(x3)
cur4 = self.norm14(x4)
cur2 = self.factoratt_crpe2(cur2, size=S2)
cur3 = self.factoratt_crpe3(cur3, size=S3)
cur4 = self.factoratt_crpe4(cur4, size=S4)
upsample3_2 = self.upsample(cur3, factor=2., size=S3)
upsample4_3 = self.upsample(cur4, factor=2., size=S4)
upsample4_2 = self.upsample(cur4, factor=4., size=S4)
downsample2_3 = self.downsample(cur2, factor=2., size=S2)
downsample3_4 = self.downsample(cur3, factor=2., size=S3)
downsample2_4 = self.downsample(cur2, factor=4., size=S2)
cur2 = cur2 + upsample3_2 + upsample4_2
cur3 = cur3 + upsample4_3 + downsample2_3
cur4 = cur4 + downsample3_4 + downsample2_4
x2 = x2 + self.drop_path(cur2)
x3 = x3 + self.drop_path(cur3)
x4 = x4 + self.drop_path(cur4)
# MLP.
cur2 = self.norm22(x2)
cur3 = self.norm23(x3)
cur4 = self.norm24(x4)
cur2 = self.mlp2(cur2)
cur3 = self.mlp3(cur3)
cur4 = self.mlp4(cur4)
x2 = x2 + self.drop_path(cur2)
x3 = x3 + self.drop_path(cur3)
x4 = x4 + self.drop_path(cur4)
return x1, x2, x3, x4
class CoaT(nn.Module):
""" CoaT class. """
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dims=(64, 128, 320, 512),
serial_depths=(3, 4, 6, 3),
parallel_depth=0,
num_heads=8,
mlp_ratios=(4, 4, 4, 4),
qkv_bias=True,
drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=LayerNorm,
return_interm_layers=False,
out_features=None,
crpe_window=None,
global_pool='token',
):
super().__init__()
assert global_pool in ('token', 'avg')
crpe_window = crpe_window or {3: 2, 5: 3, 7: 3}
self.return_interm_layers = return_interm_layers
self.out_features = out_features
self.embed_dims = embed_dims
self.num_features = self.head_hidden_size = embed_dims[-1]
self.num_classes = num_classes
self.global_pool = global_pool
# Patch embeddings.
img_size = to_2tuple(img_size)
self.patch_embed1 = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans,
embed_dim=embed_dims[0], norm_layer=nn.LayerNorm)
self.patch_embed2 = PatchEmbed(
img_size=[x // 4 for x in img_size], patch_size=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1], norm_layer=nn.LayerNorm)
self.patch_embed3 = PatchEmbed(
img_size=[x // 8 for x in img_size], patch_size=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2], norm_layer=nn.LayerNorm)
self.patch_embed4 = PatchEmbed(
img_size=[x // 16 for x in img_size], patch_size=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3], norm_layer=nn.LayerNorm)
# Class tokens.
self.cls_token1 = nn.Parameter(torch.zeros(1, 1, embed_dims[0]))
self.cls_token2 = nn.Parameter(torch.zeros(1, 1, embed_dims[1]))
self.cls_token3 = nn.Parameter(torch.zeros(1, 1, embed_dims[2]))
self.cls_token4 = nn.Parameter(torch.zeros(1, 1, embed_dims[3]))
# Convolutional position encodings.
self.cpe1 = ConvPosEnc(dim=embed_dims[0], k=3)
self.cpe2 = ConvPosEnc(dim=embed_dims[1], k=3)
self.cpe3 = ConvPosEnc(dim=embed_dims[2], k=3)
self.cpe4 = ConvPosEnc(dim=embed_dims[3], k=3)
# Convolutional relative position encodings.
self.crpe1 = ConvRelPosEnc(head_chs=embed_dims[0] // num_heads, num_heads=num_heads, window=crpe_window)
self.crpe2 = ConvRelPosEnc(head_chs=embed_dims[1] // num_heads, num_heads=num_heads, window=crpe_window)
self.crpe3 = ConvRelPosEnc(head_chs=embed_dims[2] // num_heads, num_heads=num_heads, window=crpe_window)
self.crpe4 = ConvRelPosEnc(head_chs=embed_dims[3] // num_heads, num_heads=num_heads, window=crpe_window)
# Disable stochastic depth.
dpr = drop_path_rate
assert dpr == 0.0
skwargs = dict(
num_heads=num_heads,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr,
norm_layer=norm_layer,
)
# Serial blocks 1.
self.serial_blocks1 = nn.ModuleList([
SerialBlock(
dim=embed_dims[0],
mlp_ratio=mlp_ratios[0],
shared_cpe=self.cpe1,
shared_crpe=self.crpe1,
**skwargs,
)
for _ in range(serial_depths[0])]
)
# Serial blocks 2.
self.serial_blocks2 = nn.ModuleList([
SerialBlock(
dim=embed_dims[1],
mlp_ratio=mlp_ratios[1],
shared_cpe=self.cpe2,
shared_crpe=self.crpe2,
**skwargs,
)
for _ in range(serial_depths[1])]
)
# Serial blocks 3.
self.serial_blocks3 = nn.ModuleList([
SerialBlock(
dim=embed_dims[2],
mlp_ratio=mlp_ratios[2],
shared_cpe=self.cpe3,
shared_crpe=self.crpe3,
**skwargs,
)
for _ in range(serial_depths[2])]
)
# Serial blocks 4.
self.serial_blocks4 = nn.ModuleList([
SerialBlock(
dim=embed_dims[3],
mlp_ratio=mlp_ratios[3],
shared_cpe=self.cpe4,
shared_crpe=self.crpe4,
**skwargs,
)
for _ in range(serial_depths[3])]
)
# Parallel blocks.
self.parallel_depth = parallel_depth
if self.parallel_depth > 0:
self.parallel_blocks = nn.ModuleList([
ParallelBlock(
dims=embed_dims,
mlp_ratios=mlp_ratios,
shared_crpes=(self.crpe1, self.crpe2, self.crpe3, self.crpe4),
**skwargs,
)
for _ in range(parallel_depth)]
)
else:
self.parallel_blocks = None
# Classification head(s).
if not self.return_interm_layers:
if self.parallel_blocks is not None:
self.norm2 = norm_layer(embed_dims[1])
self.norm3 = norm_layer(embed_dims[2])
else:
self.norm2 = self.norm3 = None
self.norm4 = norm_layer(embed_dims[3])
if self.parallel_depth > 0:
# CoaT series: Aggregate features of last three scales for classification.
assert embed_dims[1] == embed_dims[2] == embed_dims[3]
self.aggregate = torch.nn.Conv1d(in_channels=3, out_channels=1, kernel_size=1)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
else:
# CoaT-Lite series: Use feature of last scale for classification.
self.aggregate = None
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# Initialize weights.
trunc_normal_(self.cls_token1, std=.02)
trunc_normal_(self.cls_token2, std=.02)
trunc_normal_(self.cls_token3, std=.02)
trunc_normal_(self.cls_token4, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'cls_token1', 'cls_token2', 'cls_token3', 'cls_token4'}
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem1=r'^cls_token1|patch_embed1|crpe1|cpe1',
serial_blocks1=r'^serial_blocks1\.(\d+)',
stem2=r'^cls_token2|patch_embed2|crpe2|cpe2',
serial_blocks2=r'^serial_blocks2\.(\d+)',
stem3=r'^cls_token3|patch_embed3|crpe3|cpe3',
serial_blocks3=r'^serial_blocks3\.(\d+)',
stem4=r'^cls_token4|patch_embed4|crpe4|cpe4',
serial_blocks4=r'^serial_blocks4\.(\d+)',
parallel_blocks=[ # FIXME (partially?) overlap parallel w/ serial blocks??
(r'^parallel_blocks\.(\d+)', None),
(r'^norm|aggregate', (99999,)),
]
)
return matcher
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('token', 'avg')
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x0):
B = x0.shape[0]
# Serial blocks 1.
x1 = self.patch_embed1(x0)
H1, W1 = self.patch_embed1.grid_size
x1 = insert_cls(x1, self.cls_token1)
for blk in self.serial_blocks1:
x1 = blk(x1, size=(H1, W1))
x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
# Serial blocks 2.
x2 = self.patch_embed2(x1_nocls)
H2, W2 = self.patch_embed2.grid_size
x2 = insert_cls(x2, self.cls_token2)
for blk in self.serial_blocks2:
x2 = blk(x2, size=(H2, W2))
x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
# Serial blocks 3.
x3 = self.patch_embed3(x2_nocls)
H3, W3 = self.patch_embed3.grid_size
x3 = insert_cls(x3, self.cls_token3)
for blk in self.serial_blocks3:
x3 = blk(x3, size=(H3, W3))
x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous()
# Serial blocks 4.
x4 = self.patch_embed4(x3_nocls)
H4, W4 = self.patch_embed4.grid_size
x4 = insert_cls(x4, self.cls_token4)
for blk in self.serial_blocks4:
x4 = blk(x4, size=(H4, W4))
x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous()
# Only serial blocks: Early return.
if self.parallel_blocks is None:
if not torch.jit.is_scripting() and self.return_interm_layers:
# Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2).
feat_out = {}
if 'x1_nocls' in self.out_features:
feat_out['x1_nocls'] = x1_nocls
if 'x2_nocls' in self.out_features:
feat_out['x2_nocls'] = x2_nocls
if 'x3_nocls' in self.out_features:
feat_out['x3_nocls'] = x3_nocls
if 'x4_nocls' in self.out_features:
feat_out['x4_nocls'] = x4_nocls
return feat_out
else:
# Return features for classification.
x4 = self.norm4(x4)
return x4
# Parallel blocks.
for blk in self.parallel_blocks:
x2, x3, x4 = self.cpe2(x2, (H2, W2)), self.cpe3(x3, (H3, W3)), self.cpe4(x4, (H4, W4))
x1, x2, x3, x4 = blk(x1, x2, x3, x4, sizes=[(H1, W1), (H2, W2), (H3, W3), (H4, W4)])
if not torch.jit.is_scripting() and self.return_interm_layers:
# Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2).
feat_out = {}
if 'x1_nocls' in self.out_features:
x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x1_nocls'] = x1_nocls
if 'x2_nocls' in self.out_features:
x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x2_nocls'] = x2_nocls
if 'x3_nocls' in self.out_features:
x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x3_nocls'] = x3_nocls
if 'x4_nocls' in self.out_features:
x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x4_nocls'] = x4_nocls
return feat_out
else:
x2 = self.norm2(x2)
x3 = self.norm3(x3)
x4 = self.norm4(x4)
return [x2, x3, x4]
def forward_head(self, x_feat: Union[torch.Tensor, List[torch.Tensor]], pre_logits: bool = False):
if isinstance(x_feat, list):
assert self.aggregate is not None
if self.global_pool == 'avg':
x = torch.cat([xl[:, 1:].mean(dim=1, keepdim=True) for xl in x_feat], dim=1) # [B, 3, C]
else:
x = torch.stack([xl[:, 0] for xl in x_feat], dim=1) # [B, 3, C]
x = self.aggregate(x).squeeze(dim=1) # Shape: [B, C]
else:
x = x_feat[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x_feat[:, 0]
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x) -> torch.Tensor:
if not torch.jit.is_scripting() and self.return_interm_layers:
# Return intermediate features (for down-stream tasks).
return self.forward_features(x)
else:
# Return features for classification.
x_feat = self.forward_features(x)
x = self.forward_head(x_feat)
return x
def insert_cls(x, cls_token):
""" Insert CLS token. """
cls_tokens = cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
return x
def remove_cls(x):
""" Remove CLS token. """
return x[:, 1:, :]
def checkpoint_filter_fn(state_dict, model):
out_dict = {}
state_dict = state_dict.get('model', state_dict)
for k, v in state_dict.items():
# original model had unused norm layers, removing them requires filtering pretrained checkpoints
if k.startswith('norm1') or \
(k.startswith('norm2') and getattr(model, 'norm2', None) is None) or \
(k.startswith('norm3') and getattr(model, 'norm3', None) is None) or \
(k.startswith('norm4') and getattr(model, 'norm4', None) is None) or \
(k.startswith('aggregate') and getattr(model, 'aggregate', None) is None) or \
(k.startswith('head') and getattr(model, 'head', None) is None):
continue
out_dict[k] = v
return out_dict
def _create_coat(variant, pretrained=False, default_cfg=None, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
CoaT,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs,
)
return model
def _cfg_coat(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed1.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'coat_tiny.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_mini.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_small.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_tiny.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_mini.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_small.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_medium.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_medium_384.in1k': _cfg_coat(
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0, crop_mode='squash',
),
})
@register_model
def coat_tiny(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[152, 152, 152, 152], serial_depths=[2, 2, 2, 2], parallel_depth=6)
model = _create_coat('coat_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_mini(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[152, 216, 216, 216], serial_depths=[2, 2, 2, 2], parallel_depth=6)
model = _create_coat('coat_mini', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_small(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[152, 320, 320, 320], serial_depths=[2, 2, 2, 2], parallel_depth=6, **kwargs)
model = _create_coat('coat_small', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_tiny(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 256, 320], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4])
model = _create_coat('coat_lite_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_mini(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4])
model = _create_coat('coat_lite_mini', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_small(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[3, 4, 6, 3], mlp_ratios=[8, 8, 4, 4])
model = _create_coat('coat_lite_small', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_medium(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8])
model = _create_coat('coat_lite_medium', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_medium_384(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
img_size=384, patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8])
model = _create_coat('coat_lite_medium_384', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
|
pytorch-image-models/timm/models/coat.py/0
|
{
"file_path": "pytorch-image-models/timm/models/coat.py",
"repo_id": "pytorch-image-models",
"token_count": 15701
}
| 211
|
""" EfficientViT (by MSRA)
Paper: `EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention`
- https://arxiv.org/abs/2305.07027
Adapted from official impl at https://github.com/microsoft/Cream/tree/main/EfficientViT
"""
__all__ = ['EfficientVitMsra']
import itertools
from collections import OrderedDict
from typing import Dict, Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SqueezeExcite, SelectAdaptivePool2d, trunc_normal_, _assert
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
class ConvNorm(torch.nn.Sequential):
def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1):
super().__init__()
self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False)
self.bn = nn.BatchNorm2d(out_chs)
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
torch.nn.init.constant_(self.bn.bias, 0)
@torch.no_grad()
def fuse(self):
c, bn = self.conv, self.bn
w = bn.weight / (bn.running_var + bn.eps)**0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / \
(bn.running_var + bn.eps)**0.5
m = torch.nn.Conv2d(
w.size(1) * self.conv.groups, w.size(0), w.shape[2:],
stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class NormLinear(torch.nn.Sequential):
def __init__(self, in_features, out_features, bias=True, std=0.02, drop=0.):
super().__init__()
self.bn = nn.BatchNorm1d(in_features)
self.drop = nn.Dropout(drop)
self.linear = nn.Linear(in_features, out_features, bias=bias)
trunc_normal_(self.linear.weight, std=std)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
@torch.no_grad()
def fuse(self):
bn, linear = self.bn, self.linear
w = bn.weight / (bn.running_var + bn.eps)**0.5
b = bn.bias - self.bn.running_mean * \
self.bn.weight / (bn.running_var + bn.eps)**0.5
w = linear.weight * w[None, :]
if linear.bias is None:
b = b @ self.linear.weight.T
else:
b = (linear.weight @ b[:, None]).view(-1) + self.linear.bias
m = torch.nn.Linear(w.size(1), w.size(0))
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class PatchMerging(torch.nn.Module):
def __init__(self, dim, out_dim):
super().__init__()
hid_dim = int(dim * 4)
self.conv1 = ConvNorm(dim, hid_dim, 1, 1, 0)
self.act = torch.nn.ReLU()
self.conv2 = ConvNorm(hid_dim, hid_dim, 3, 2, 1, groups=hid_dim)
self.se = SqueezeExcite(hid_dim, .25)
self.conv3 = ConvNorm(hid_dim, out_dim, 1, 1, 0)
def forward(self, x):
x = self.conv3(self.se(self.act(self.conv2(self.act(self.conv1(x))))))
return x
class ResidualDrop(torch.nn.Module):
def __init__(self, m, drop=0.):
super().__init__()
self.m = m
self.drop = drop
def forward(self, x):
if self.training and self.drop > 0:
return x + self.m(x) * torch.rand(
x.size(0), 1, 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach()
else:
return x + self.m(x)
class ConvMlp(torch.nn.Module):
def __init__(self, ed, h):
super().__init__()
self.pw1 = ConvNorm(ed, h)
self.act = torch.nn.ReLU()
self.pw2 = ConvNorm(h, ed, bn_weight_init=0)
def forward(self, x):
x = self.pw2(self.act(self.pw1(x)))
return x
class CascadedGroupAttention(torch.nn.Module):
attention_bias_cache: Dict[str, torch.Tensor]
r""" Cascaded Group Attention.
Args:
dim (int): Number of input channels.
key_dim (int): The dimension for query and key.
num_heads (int): Number of attention heads.
attn_ratio (int): Multiplier for the query dim for value dimension.
resolution (int): Input resolution, correspond to the window size.
kernels (List[int]): The kernel size of the dw conv on query.
"""
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=14,
kernels=(5, 5, 5, 5),
):
super().__init__()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.val_dim = int(attn_ratio * key_dim)
self.attn_ratio = attn_ratio
qkvs = []
dws = []
for i in range(num_heads):
qkvs.append(ConvNorm(dim // (num_heads), self.key_dim * 2 + self.val_dim))
dws.append(ConvNorm(self.key_dim, self.key_dim, kernels[i], 1, kernels[i] // 2, groups=self.key_dim))
self.qkvs = torch.nn.ModuleList(qkvs)
self.dws = torch.nn.ModuleList(dws)
self.proj = torch.nn.Sequential(
torch.nn.ReLU(),
ConvNorm(self.val_dim * num_heads, dim, bn_weight_init=0)
)
points = list(itertools.product(range(resolution), range(resolution)))
N = len(points)
attention_offsets = {}
idxs = []
for p1 in points:
for p2 in points:
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
idxs.append(attention_offsets[offset])
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False)
self.attention_bias_cache = {}
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x):
B, C, H, W = x.shape
feats_in = x.chunk(len(self.qkvs), dim=1)
feats_out = []
feat = feats_in[0]
attn_bias = self.get_attention_biases(x.device)
for head_idx, (qkv, dws) in enumerate(zip(self.qkvs, self.dws)):
if head_idx > 0:
feat = feat + feats_in[head_idx]
feat = qkv(feat)
q, k, v = feat.view(B, -1, H, W).split([self.key_dim, self.key_dim, self.val_dim], dim=1)
q = dws(q)
q, k, v = q.flatten(2), k.flatten(2), v.flatten(2)
q = q * self.scale
attn = q.transpose(-2, -1) @ k
attn = attn + attn_bias[head_idx]
attn = attn.softmax(dim=-1)
feat = v @ attn.transpose(-2, -1)
feat = feat.view(B, self.val_dim, H, W)
feats_out.append(feat)
x = self.proj(torch.cat(feats_out, 1))
return x
class LocalWindowAttention(torch.nn.Module):
r""" Local Window Attention.
Args:
dim (int): Number of input channels.
key_dim (int): The dimension for query and key.
num_heads (int): Number of attention heads.
attn_ratio (int): Multiplier for the query dim for value dimension.
resolution (int): Input resolution.
window_resolution (int): Local window resolution.
kernels (List[int]): The kernel size of the dw conv on query.
"""
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=14,
window_resolution=7,
kernels=(5, 5, 5, 5),
):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.resolution = resolution
assert window_resolution > 0, 'window_size must be greater than 0'
self.window_resolution = window_resolution
window_resolution = min(window_resolution, resolution)
self.attn = CascadedGroupAttention(
dim, key_dim, num_heads,
attn_ratio=attn_ratio,
resolution=window_resolution,
kernels=kernels,
)
def forward(self, x):
H = W = self.resolution
B, C, H_, W_ = x.shape
# Only check this for classifcation models
_assert(H == H_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}')
_assert(W == W_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}')
if H <= self.window_resolution and W <= self.window_resolution:
x = self.attn(x)
else:
x = x.permute(0, 2, 3, 1)
pad_b = (self.window_resolution - H % self.window_resolution) % self.window_resolution
pad_r = (self.window_resolution - W % self.window_resolution) % self.window_resolution
x = torch.nn.functional.pad(x, (0, 0, 0, pad_r, 0, pad_b))
pH, pW = H + pad_b, W + pad_r
nH = pH // self.window_resolution
nW = pW // self.window_resolution
# window partition, BHWC -> B(nHh)(nWw)C -> BnHnWhwC -> (BnHnW)hwC -> (BnHnW)Chw
x = x.view(B, nH, self.window_resolution, nW, self.window_resolution, C).transpose(2, 3)
x = x.reshape(B * nH * nW, self.window_resolution, self.window_resolution, C).permute(0, 3, 1, 2)
x = self.attn(x)
# window reverse, (BnHnW)Chw -> (BnHnW)hwC -> BnHnWhwC -> B(nHh)(nWw)C -> BHWC
x = x.permute(0, 2, 3, 1).view(B, nH, nW, self.window_resolution, self.window_resolution, C)
x = x.transpose(2, 3).reshape(B, pH, pW, C)
x = x[:, :H, :W].contiguous()
x = x.permute(0, 3, 1, 2)
return x
class EfficientVitBlock(torch.nn.Module):
""" A basic EfficientVit building block.
Args:
dim (int): Number of input channels.
key_dim (int): Dimension for query and key in the token mixer.
num_heads (int): Number of attention heads.
attn_ratio (int): Multiplier for the query dim for value dimension.
resolution (int): Input resolution.
window_resolution (int): Local window resolution.
kernels (List[int]): The kernel size of the dw conv on query.
"""
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=14,
window_resolution=7,
kernels=[5, 5, 5, 5],
):
super().__init__()
self.dw0 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.))
self.ffn0 = ResidualDrop(ConvMlp(dim, int(dim * 2)))
self.mixer = ResidualDrop(
LocalWindowAttention(
dim, key_dim, num_heads,
attn_ratio=attn_ratio,
resolution=resolution,
window_resolution=window_resolution,
kernels=kernels,
)
)
self.dw1 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.))
self.ffn1 = ResidualDrop(ConvMlp(dim, int(dim * 2)))
def forward(self, x):
return self.ffn1(self.dw1(self.mixer(self.ffn0(self.dw0(x)))))
class EfficientVitStage(torch.nn.Module):
def __init__(
self,
in_dim,
out_dim,
key_dim,
downsample=('', 1),
num_heads=8,
attn_ratio=4,
resolution=14,
window_resolution=7,
kernels=[5, 5, 5, 5],
depth=1,
):
super().__init__()
if downsample[0] == 'subsample':
self.resolution = (resolution - 1) // downsample[1] + 1
down_blocks = []
down_blocks.append((
'res1',
torch.nn.Sequential(
ResidualDrop(ConvNorm(in_dim, in_dim, 3, 1, 1, groups=in_dim)),
ResidualDrop(ConvMlp(in_dim, int(in_dim * 2))),
)
))
down_blocks.append(('patchmerge', PatchMerging(in_dim, out_dim)))
down_blocks.append((
'res2',
torch.nn.Sequential(
ResidualDrop(ConvNorm(out_dim, out_dim, 3, 1, 1, groups=out_dim)),
ResidualDrop(ConvMlp(out_dim, int(out_dim * 2))),
)
))
self.downsample = nn.Sequential(OrderedDict(down_blocks))
else:
assert in_dim == out_dim
self.downsample = nn.Identity()
self.resolution = resolution
blocks = []
for d in range(depth):
blocks.append(EfficientVitBlock(out_dim, key_dim, num_heads, attn_ratio, self.resolution, window_resolution, kernels))
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.blocks(x)
return x
class PatchEmbedding(torch.nn.Sequential):
def __init__(self, in_chans, dim):
super().__init__()
self.add_module('conv1', ConvNorm(in_chans, dim // 8, 3, 2, 1))
self.add_module('relu1', torch.nn.ReLU())
self.add_module('conv2', ConvNorm(dim // 8, dim // 4, 3, 2, 1))
self.add_module('relu2', torch.nn.ReLU())
self.add_module('conv3', ConvNorm(dim // 4, dim // 2, 3, 2, 1))
self.add_module('relu3', torch.nn.ReLU())
self.add_module('conv4', ConvNorm(dim // 2, dim, 3, 2, 1))
self.patch_size = 16
class EfficientVitMsra(nn.Module):
def __init__(
self,
img_size=224,
in_chans=3,
num_classes=1000,
embed_dim=(64, 128, 192),
key_dim=(16, 16, 16),
depth=(1, 2, 3),
num_heads=(4, 4, 4),
window_size=(7, 7, 7),
kernels=(5, 5, 5, 5),
down_ops=(('', 1), ('subsample', 2), ('subsample', 2)),
global_pool='avg',
drop_rate=0.,
):
super(EfficientVitMsra, self).__init__()
self.grad_checkpointing = False
self.num_classes = num_classes
self.drop_rate = drop_rate
# Patch embedding
self.patch_embed = PatchEmbedding(in_chans, embed_dim[0])
stride = self.patch_embed.patch_size
resolution = img_size // self.patch_embed.patch_size
attn_ratio = [embed_dim[i] / (key_dim[i] * num_heads[i]) for i in range(len(embed_dim))]
# Build EfficientVit blocks
self.feature_info = []
stages = []
pre_ed = embed_dim[0]
for i, (ed, kd, dpth, nh, ar, wd, do) in enumerate(
zip(embed_dim, key_dim, depth, num_heads, attn_ratio, window_size, down_ops)):
stage = EfficientVitStage(
in_dim=pre_ed,
out_dim=ed,
key_dim=kd,
downsample=do,
num_heads=nh,
attn_ratio=ar,
resolution=resolution,
window_resolution=wd,
kernels=kernels,
depth=dpth,
)
pre_ed = ed
if do[0] == 'subsample' and i != 0:
stride *= do[1]
resolution = stage.resolution
stages.append(stage)
self.feature_info += [dict(num_chs=ed, reduction=stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
if global_pool == 'avg':
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
assert num_classes == 0
self.global_pool = nn.Identity()
self.num_features = self.head_hidden_size = embed_dim[-1]
self.head = NormLinear(
self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity()
@torch.jit.ignore
def no_weight_decay(self):
return {x for x in self.state_dict().keys() if 'attention_biases' in x}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.linear
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
if global_pool == 'avg':
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
assert num_classes == 0
self.global_pool = nn.Identity()
self.head = NormLinear(
self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
# def checkpoint_filter_fn(state_dict, model):
# if 'model' in state_dict.keys():
# state_dict = state_dict['model']
# tmp_dict = {}
# out_dict = {}
# target_keys = model.state_dict().keys()
# target_keys = [k for k in target_keys if k.startswith('stages.')]
#
# for k, v in state_dict.items():
# if 'attention_bias_idxs' in k:
# continue
# k = k.split('.')
# if k[-2] == 'c':
# k[-2] = 'conv'
# if k[-2] == 'l':
# k[-2] = 'linear'
# k = '.'.join(k)
# tmp_dict[k] = v
#
# for k, v in tmp_dict.items():
# if k.startswith('patch_embed'):
# k = k.split('.')
# k[1] = 'conv' + str(int(k[1]) // 2 + 1)
# k = '.'.join(k)
# elif k.startswith('blocks'):
# kw = '.'.join(k.split('.')[2:])
# find_kw = [a for a in list(sorted(tmp_dict.keys())) if kw in a]
# idx = find_kw.index(k)
# k = [a for a in target_keys if kw in a][idx]
# out_dict[k] = v
#
# return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv1.conv',
'classifier': 'head.linear',
'fixed_input_size': True,
'pool_size': (4, 4),
**kwargs,
}
default_cfgs = generate_default_cfgs({
'efficientvit_m0.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m0.pth'
),
'efficientvit_m1.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m1.pth'
),
'efficientvit_m2.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m2.pth'
),
'efficientvit_m3.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m3.pth'
),
'efficientvit_m4.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m4.pth'
),
'efficientvit_m5.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m5.pth'
),
})
def _create_efficientvit_msra(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2))
model = build_model_with_cfg(
EfficientVitMsra,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs
)
return model
@register_model
def efficientvit_m0(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[64, 128, 192],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5]
)
return _create_efficientvit_msra('efficientvit_m0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m1(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 144, 192],
depth=[1, 2, 3],
num_heads=[2, 3, 3],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m2(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 192, 224],
depth=[1, 2, 3],
num_heads=[4, 3, 2],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m3(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 240, 320],
depth=[1, 2, 3],
num_heads=[4, 3, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5]
)
return _create_efficientvit_msra('efficientvit_m3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m4(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 256, 384],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m4', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m5(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[192, 288, 384],
depth=[1, 3, 4],
num_heads=[3, 3, 4],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m5', pretrained=pretrained, **dict(model_args, **kwargs))
|
pytorch-image-models/timm/models/efficientvit_msra.py/0
|
{
"file_path": "pytorch-image-models/timm/models/efficientvit_msra.py",
"repo_id": "pytorch-image-models",
"token_count": 11894
}
| 212
|
"""
InceptionNeXt paper: https://arxiv.org/abs/2303.16900
Original implementation & weights from: https://github.com/sail-sg/inceptionnext
"""
from functools import partial
from typing import Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_, DropPath, to_2tuple, get_padding, SelectAdaptivePool2d
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['MetaNeXt']
class InceptionDWConv2d(nn.Module):
""" Inception depthwise convolution
"""
def __init__(
self,
in_chs,
square_kernel_size=3,
band_kernel_size=11,
branch_ratio=0.125,
dilation=1,
):
super().__init__()
gc = int(in_chs * branch_ratio) # channel numbers of a convolution branch
square_padding = get_padding(square_kernel_size, dilation=dilation)
band_padding = get_padding(band_kernel_size, dilation=dilation)
self.dwconv_hw = nn.Conv2d(
gc, gc, square_kernel_size,
padding=square_padding, dilation=dilation, groups=gc)
self.dwconv_w = nn.Conv2d(
gc, gc, (1, band_kernel_size),
padding=(0, band_padding), dilation=(1, dilation), groups=gc)
self.dwconv_h = nn.Conv2d(
gc, gc, (band_kernel_size, 1),
padding=(band_padding, 0), dilation=(dilation, 1), groups=gc)
self.split_indexes = (in_chs - 3 * gc, gc, gc, gc)
def forward(self, x):
x_id, x_hw, x_w, x_h = torch.split(x, self.split_indexes, dim=1)
return torch.cat((
x_id,
self.dwconv_hw(x_hw),
self.dwconv_w(x_w),
self.dwconv_h(x_h)
), dim=1,
)
class ConvMlp(nn.Module):
""" MLP using 1x1 convs that keeps spatial dims
copied from timm: https://github.com/huggingface/pytorch-image-models/blob/v0.6.11/timm/models/layers/mlp.py
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.ReLU,
norm_layer=None,
bias=True,
drop=0.,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
bias = to_2tuple(bias)
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0])
self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
self.act = act_layer()
self.drop = nn.Dropout(drop)
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1])
def forward(self, x):
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x
class MlpClassifierHead(nn.Module):
""" MLP classification head
"""
def __init__(
self,
in_features,
num_classes=1000,
pool_type='avg',
mlp_ratio=3,
act_layer=nn.GELU,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
drop=0.,
bias=True
):
super().__init__()
self.use_conv = False
self.in_features = in_features
self.num_features = hidden_features = int(mlp_ratio * in_features)
assert pool_type, 'Cannot disable pooling'
self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True)
self.fc1 = nn.Linear(in_features * self.global_pool.feat_mult(), hidden_features, bias=bias)
self.act = act_layer()
self.norm = norm_layer(hidden_features)
self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias)
self.drop = nn.Dropout(drop)
def reset(self, num_classes: int, pool_type: Optional[str] = None):
if pool_type is not None:
assert pool_type, 'Cannot disable pooling'
self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True)
self.fc2 = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.fc1(x)
x = self.act(x)
x = self.norm(x)
x = self.drop(x)
return x if pre_logits else self.fc2(x)
class MetaNeXtBlock(nn.Module):
""" MetaNeXtBlock Block
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(
self,
dim,
dilation=1,
token_mixer=InceptionDWConv2d,
norm_layer=nn.BatchNorm2d,
mlp_layer=ConvMlp,
mlp_ratio=4,
act_layer=nn.GELU,
ls_init_value=1e-6,
drop_path=0.,
):
super().__init__()
self.token_mixer = token_mixer(dim, dilation=dilation)
self.norm = norm_layer(dim)
self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=act_layer)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.token_mixer(x)
x = self.norm(x)
x = self.mlp(x)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1))
x = self.drop_path(x) + shortcut
return x
class MetaNeXtStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride=2,
depth=2,
dilation=(1, 1),
drop_path_rates=None,
ls_init_value=1.0,
token_mixer=InceptionDWConv2d,
act_layer=nn.GELU,
norm_layer=None,
mlp_ratio=4,
):
super().__init__()
self.grad_checkpointing = False
if stride > 1 or dilation[0] != dilation[1]:
self.downsample = nn.Sequential(
norm_layer(in_chs),
nn.Conv2d(
in_chs,
out_chs,
kernel_size=2,
stride=stride,
dilation=dilation[0],
),
)
else:
self.downsample = nn.Identity()
drop_path_rates = drop_path_rates or [0.] * depth
stage_blocks = []
for i in range(depth):
stage_blocks.append(MetaNeXtBlock(
dim=out_chs,
dilation=dilation[1],
drop_path=drop_path_rates[i],
ls_init_value=ls_init_value,
token_mixer=token_mixer,
act_layer=act_layer,
norm_layer=norm_layer,
mlp_ratio=mlp_ratio,
))
self.blocks = nn.Sequential(*stage_blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class MetaNeXt(nn.Module):
r""" MetaNeXt
A PyTorch impl of : `InceptionNeXt: When Inception Meets ConvNeXt` - https://arxiv.org/abs/2303.16900
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 9, 3)
dims (tuple(int)): Feature dimension at each stage. Default: (96, 192, 384, 768)
token_mixers: Token mixer function. Default: nn.Identity
norm_layer: Normalization layer. Default: nn.BatchNorm2d
act_layer: Activation function for MLP. Default: nn.GELU
mlp_ratios (int or tuple(int)): MLP ratios. Default: (4, 4, 4, 3)
drop_rate (float): Head dropout rate
drop_path_rate (float): Stochastic depth rate. Default: 0.
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
output_stride=32,
depths=(3, 3, 9, 3),
dims=(96, 192, 384, 768),
token_mixers=InceptionDWConv2d,
norm_layer=nn.BatchNorm2d,
act_layer=nn.GELU,
mlp_ratios=(4, 4, 4, 3),
drop_rate=0.,
drop_path_rate=0.,
ls_init_value=1e-6,
):
super().__init__()
num_stage = len(depths)
if not isinstance(token_mixers, (list, tuple)):
token_mixers = [token_mixers] * num_stage
if not isinstance(mlp_ratios, (list, tuple)):
mlp_ratios = [mlp_ratios] * num_stage
self.num_classes = num_classes
self.global_pool = global_pool
self.drop_rate = drop_rate
self.feature_info = []
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
norm_layer(dims[0])
)
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
prev_chs = dims[0]
curr_stride = 4
dilation = 1
# feature resolution stages, each consisting of multiple residual blocks
self.stages = nn.Sequential()
for i in range(num_stage):
stride = 2 if curr_stride == 2 or i > 0 else 1
if curr_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
curr_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
out_chs = dims[i]
self.stages.append(MetaNeXtStage(
prev_chs,
out_chs,
stride=stride if i > 0 else 1,
dilation=(first_dilation, dilation),
depth=depths[i],
drop_path_rates=dp_rates[i],
ls_init_value=ls_init_value,
act_layer=act_layer,
token_mixer=token_mixers[i],
norm_layer=norm_layer,
mlp_ratio=mlp_ratios[i],
))
prev_chs = out_chs
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
self.num_features = prev_chs
self.head = MlpClassifierHead(self.num_features, num_classes, pool_type=self.global_pool, drop=drop_rate)
self.head_hidden_size = self.head.num_features
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.downsample', (0,)), # blocks
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
]
)
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc2
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def no_weight_decay(self):
return set()
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc2',
**kwargs
}
default_cfgs = generate_default_cfgs({
'inception_next_tiny.sail_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_tiny.pth',
),
'inception_next_small.sail_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_small.pth',
),
'inception_next_base.sail_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base.pth',
crop_pct=0.95,
),
'inception_next_base.sail_in1k_384': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base_384.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
})
def _create_inception_next(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
MetaNeXt, variant, pretrained,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs,
)
return model
@register_model
def inception_next_tiny(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 3, 9, 3), dims=(96, 192, 384, 768),
token_mixers=InceptionDWConv2d,
)
return _create_inception_next('inception_next_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def inception_next_small(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 3, 27, 3), dims=(96, 192, 384, 768),
token_mixers=InceptionDWConv2d,
)
return _create_inception_next('inception_next_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def inception_next_base(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 3, 27, 3), dims=(128, 256, 512, 1024),
token_mixers=InceptionDWConv2d,
)
return _create_inception_next('inception_next_base', pretrained=pretrained, **dict(model_args, **kwargs))
|
pytorch-image-models/timm/models/inception_next.py/0
|
{
"file_path": "pytorch-image-models/timm/models/inception_next.py",
"repo_id": "pytorch-image-models",
"token_count": 7422
}
| 213
|
""" Pooling-based Vision Transformer (PiT) in PyTorch
A PyTorch implement of Pooling-based Vision Transformers as described in
'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302
This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below.
Modifications for timm by / Copyright 2020 Ross Wightman
"""
# PiT
# Copyright 2021-present NAVER Corp.
# Apache License v2.0
import math
import re
from functools import partial
from typing import Optional, Sequence, Tuple
import torch
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_, to_2tuple
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from .vision_transformer import Block
__all__ = ['PoolingVisionTransformer'] # model_registry will add each entrypoint fn to this
class SequentialTuple(nn.Sequential):
""" This module exists to work around torchscript typing issues list -> list"""
def __init__(self, *args):
super(SequentialTuple, self).__init__(*args)
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
for module in self:
x = module(x)
return x
class Transformer(nn.Module):
def __init__(
self,
base_dim,
depth,
heads,
mlp_ratio,
pool=None,
proj_drop=.0,
attn_drop=.0,
drop_path_prob=None,
norm_layer=None,
):
super(Transformer, self).__init__()
embed_dim = base_dim * heads
self.pool = pool
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim,
num_heads=heads,
mlp_ratio=mlp_ratio,
qkv_bias=True,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path_prob[i],
norm_layer=partial(nn.LayerNorm, eps=1e-6)
)
for i in range(depth)])
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
x, cls_tokens = x
token_length = cls_tokens.shape[1]
if self.pool is not None:
x, cls_tokens = self.pool(x, cls_tokens)
B, C, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = torch.cat((cls_tokens, x), dim=1)
x = self.norm(x)
x = self.blocks(x)
cls_tokens = x[:, :token_length]
x = x[:, token_length:]
x = x.transpose(1, 2).reshape(B, C, H, W)
return x, cls_tokens
class Pooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):
super(Pooling, self).__init__()
self.conv = nn.Conv2d(
in_feature,
out_feature,
kernel_size=stride + 1,
padding=stride // 2,
stride=stride,
padding_mode=padding_mode,
groups=in_feature,
)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]:
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
class ConvEmbedding(nn.Module):
def __init__(
self,
in_channels,
out_channels,
img_size: int = 224,
patch_size: int = 16,
stride: int = 8,
padding: int = 0,
):
super(ConvEmbedding, self).__init__()
padding = padding
self.img_size = to_2tuple(img_size)
self.patch_size = to_2tuple(patch_size)
self.height = math.floor((self.img_size[0] + 2 * padding - self.patch_size[0]) / stride + 1)
self.width = math.floor((self.img_size[1] + 2 * padding - self.patch_size[1]) / stride + 1)
self.grid_size = (self.height, self.width)
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size=patch_size,
stride=stride, padding=padding, bias=True)
def forward(self, x):
x = self.conv(x)
return x
class PoolingVisionTransformer(nn.Module):
""" Pooling-based Vision Transformer
A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers'
- https://arxiv.org/abs/2103.16302
"""
def __init__(
self,
img_size: int = 224,
patch_size: int = 16,
stride: int = 8,
stem_type: str = 'overlap',
base_dims: Sequence[int] = (48, 48, 48),
depth: Sequence[int] = (2, 6, 4),
heads: Sequence[int] = (2, 4, 8),
mlp_ratio: float = 4,
num_classes=1000,
in_chans=3,
global_pool='token',
distilled=False,
drop_rate=0.,
pos_drop_drate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
):
super(PoolingVisionTransformer, self).__init__()
assert global_pool in ('token',)
self.base_dims = base_dims
self.heads = heads
embed_dim = base_dims[0] * heads[0]
self.num_classes = num_classes
self.global_pool = global_pool
self.num_tokens = 2 if distilled else 1
self.feature_info = []
self.patch_embed = ConvEmbedding(in_chans, embed_dim, img_size, patch_size, stride)
self.pos_embed = nn.Parameter(torch.randn(1, embed_dim, self.patch_embed.height, self.patch_embed.width))
self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=pos_drop_drate)
transformers = []
# stochastic depth decay rule
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)]
prev_dim = embed_dim
for i in range(len(depth)):
pool = None
embed_dim = base_dims[i] * heads[i]
if i > 0:
pool = Pooling(
prev_dim,
embed_dim,
stride=2,
)
transformers += [Transformer(
base_dims[i],
depth[i],
heads[i],
mlp_ratio,
pool=pool,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path_prob=dpr[i],
)]
prev_dim = embed_dim
self.feature_info += [dict(num_chs=prev_dim, reduction=(stride - 1) * 2**i, module=f'transformers.{i}')]
self.transformers = SequentialTuple(*transformers)
self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6)
self.num_features = self.head_hidden_size = self.embed_dim = embed_dim
# Classifier head
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
self.distilled_training = False # must set this True to train w/ distillation token
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.distilled_training = enable
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
def get_classifier(self) -> nn.Module:
if self.head_dist is not None:
return self.head, self.head_dist
else:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.head_dist is not None:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = self.pos_drop(x + self.pos_embed)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
x, cls_tokens = self.transformers((x, cls_tokens))
cls_tokens = self.norm(cls_tokens)
return cls_tokens
def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor:
if self.head_dist is not None:
assert self.global_pool == 'token'
x, x_dist = x[:, 0], x[:, 1]
x = self.head_drop(x)
x_dist = self.head_drop(x)
if not pre_logits:
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.distilled_training and self.training and not torch.jit.is_scripting():
# only return separate classification predictions when training in distilled mode
return x, x_dist
else:
# during standard train / finetune, inference average the classifier predictions
return (x + x_dist) / 2
else:
if self.global_pool == 'token':
x = x[:, 0]
x = self.head_drop(x)
if not pre_logits:
x = self.head(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" preprocess checkpoints """
out_dict = {}
p_blocks = re.compile(r'pools\.(\d)\.')
for k, v in state_dict.items():
# FIXME need to update resize for PiT impl
# if k == 'pos_embed' and v.shape != model.pos_embed.shape:
# # To resize pos embedding when using model at different size from pretrained weights
# v = resize_pos_embed(v, model.pos_embed)
k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1)) + 1}.pool.', k)
out_dict[k] = v
return out_dict
def _create_pit(variant, pretrained=False, **kwargs):
default_out_indices = tuple(range(3))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
PoolingVisionTransformer,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(feature_cls='hook', no_rewrite=True, out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# deit models (FB weights)
'pit_ti_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_xs_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_s_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_b_224.in1k': _cfg(hf_hub_id='timm/'),
'pit_ti_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
'pit_xs_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
'pit_s_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
'pit_b_distilled_224.in1k': _cfg(
hf_hub_id='timm/',
classifier=('head', 'head_dist')),
})
@register_model
def pit_b_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
)
return _create_pit('pit_b_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_s_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
)
return _create_pit('pit_s_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_xs_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
)
return _create_pit('pit_xs_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_ti_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
)
return _create_pit('pit_ti_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_b_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_b_distilled_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_s_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_s_distilled_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_xs_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_xs_distilled_224', pretrained, **dict(model_args, **kwargs))
@register_model
def pit_ti_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer:
model_args = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
)
return _create_pit('pit_ti_distilled_224', pretrained, **dict(model_args, **kwargs))
|
pytorch-image-models/timm/models/pit.py/0
|
{
"file_path": "pytorch-image-models/timm/models/pit.py",
"repo_id": "pytorch-image-models",
"token_count": 7404
}
| 214
|
""" Selective Kernel Networks (ResNet base)
Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586)
This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268)
and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer
to the original paper with some modifications of my own to better balance param count vs accuracy.
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from torch import nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectiveKernel, ConvNormAct, create_attn
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from .resnet import ResNet
class SelectiveKernelBasic(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
cardinality=1,
base_width=64,
sk_kwargs=None,
reduce_first=1,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
aa_layer=None,
drop_block=None,
drop_path=None,
):
super(SelectiveKernelBasic, self).__init__()
sk_kwargs = sk_kwargs or {}
conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
assert cardinality == 1, 'BasicBlock only supports cardinality of 1'
assert base_width == 64, 'BasicBlock doest not support changing base width'
first_planes = planes // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = SelectiveKernel(
inplanes, first_planes, stride=stride, dilation=first_dilation,
aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs)
self.conv2 = ConvNormAct(
first_planes, outplanes, kernel_size=3, dilation=dilation, apply_act=False, **conv_kwargs)
self.se = create_attn(attn_layer, outplanes)
self.act = act_layer(inplace=True)
self.downsample = downsample
self.drop_path = drop_path
def zero_init_last(self):
if getattr(self.conv2.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv2.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act(x)
return x
class SelectiveKernelBottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
cardinality=1,
base_width=64,
sk_kwargs=None,
reduce_first=1,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
aa_layer=None,
drop_block=None,
drop_path=None,
):
super(SelectiveKernelBottleneck, self).__init__()
sk_kwargs = sk_kwargs or {}
conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
width = int(math.floor(planes * (base_width / 64)) * cardinality)
first_planes = width // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = ConvNormAct(inplanes, first_planes, kernel_size=1, **conv_kwargs)
self.conv2 = SelectiveKernel(
first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality,
aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs)
self.conv3 = ConvNormAct(width, outplanes, kernel_size=1, apply_act=False, **conv_kwargs)
self.se = create_attn(attn_layer, outplanes)
self.act = act_layer(inplace=True)
self.downsample = downsample
self.drop_path = drop_path
def zero_init_last(self):
if getattr(self.conv3.bn, 'weight', None) is not None:
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act(x)
return x
def _create_skresnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ResNet,
variant,
pretrained,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'skresnet18.ra_in1k': _cfg(hf_hub_id='timm/'),
'skresnet34.ra_in1k': _cfg(hf_hub_id='timm/'),
'skresnet50.untrained': _cfg(),
'skresnet50d.untrained': _cfg(
first_conv='conv1.0'),
'skresnext50_32x4d.ra_in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def skresnet18(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Selective Kernel ResNet-18 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
model_args = dict(
block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last=False, **kwargs)
return _create_skresnet('skresnet18', pretrained, **model_args)
@register_model
def skresnet34(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Selective Kernel ResNet-34 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
model_args = dict(
block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last=False, **kwargs)
return _create_skresnet('skresnet34', pretrained, **model_args)
@register_model
def skresnet50(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Select Kernel ResNet-50 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(split_input=True)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last=False, **kwargs)
return _create_skresnet('skresnet50', pretrained, **model_args)
@register_model
def skresnet50d(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Select Kernel ResNet-50-D model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(split_input=True)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs)
return _create_skresnet('skresnet50d', pretrained, **model_args)
@register_model
def skresnext50_32x4d(pretrained=False, **kwargs) -> ResNet:
"""Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to
the SKNet-50 model in the Select Kernel Paper
"""
sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,
block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs)
return _create_skresnet('skresnext50_32x4d', pretrained, **model_args)
|
pytorch-image-models/timm/models/sknet.py/0
|
{
"file_path": "pytorch-image-models/timm/models/sknet.py",
"repo_id": "pytorch-image-models",
"token_count": 3801
}
| 215
|
""" VoVNet (V1 & V2)
Papers:
* `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730
* `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
Looked at https://github.com/youngwanLEE/vovnet-detectron2 &
https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py
for some reference, rewrote most of the code.
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import List, Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ConvNormAct, SeparableConvNormAct, BatchNormAct2d, ClassifierHead, DropPath, \
create_attn, create_norm_act_layer
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['VovNet'] # model_registry will add each entrypoint fn to this
class SequentialAppendList(nn.Sequential):
def __init__(self, *args):
super(SequentialAppendList, self).__init__(*args)
def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor:
for i, module in enumerate(self):
if i == 0:
concat_list.append(module(x))
else:
concat_list.append(module(concat_list[-1]))
x = torch.cat(concat_list, dim=1)
return x
class OsaBlock(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
layer_per_block,
residual=False,
depthwise=False,
attn='',
norm_layer=BatchNormAct2d,
act_layer=nn.ReLU,
drop_path=None,
):
super(OsaBlock, self).__init__()
self.residual = residual
self.depthwise = depthwise
conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer)
next_in_chs = in_chs
if self.depthwise and next_in_chs != mid_chs:
assert not residual
self.conv_reduction = ConvNormAct(next_in_chs, mid_chs, 1, **conv_kwargs)
else:
self.conv_reduction = None
mid_convs = []
for i in range(layer_per_block):
if self.depthwise:
conv = SeparableConvNormAct(mid_chs, mid_chs, **conv_kwargs)
else:
conv = ConvNormAct(next_in_chs, mid_chs, 3, **conv_kwargs)
next_in_chs = mid_chs
mid_convs.append(conv)
self.conv_mid = SequentialAppendList(*mid_convs)
# feature aggregation
next_in_chs = in_chs + layer_per_block * mid_chs
self.conv_concat = ConvNormAct(next_in_chs, out_chs, **conv_kwargs)
self.attn = create_attn(attn, out_chs) if attn else None
self.drop_path = drop_path
def forward(self, x):
output = [x]
if self.conv_reduction is not None:
x = self.conv_reduction(x)
x = self.conv_mid(x, output)
x = self.conv_concat(x)
if self.attn is not None:
x = self.attn(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.residual:
x = x + output[0]
return x
class OsaStage(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
block_per_stage,
layer_per_block,
downsample=True,
residual=True,
depthwise=False,
attn='ese',
norm_layer=BatchNormAct2d,
act_layer=nn.ReLU,
drop_path_rates=None,
):
super(OsaStage, self).__init__()
self.grad_checkpointing = False
if downsample:
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
else:
self.pool = None
blocks = []
for i in range(block_per_stage):
last_block = i == block_per_stage - 1
if drop_path_rates is not None and drop_path_rates[i] > 0.:
drop_path = DropPath(drop_path_rates[i])
else:
drop_path = None
blocks += [OsaBlock(
in_chs,
mid_chs,
out_chs,
layer_per_block,
residual=residual and i > 0,
depthwise=depthwise,
attn=attn if last_block else '',
norm_layer=norm_layer,
act_layer=act_layer,
drop_path=drop_path
)]
in_chs = out_chs
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
if self.pool is not None:
x = self.pool(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class VovNet(nn.Module):
def __init__(
self,
cfg,
in_chans=3,
num_classes=1000,
global_pool='avg',
output_stride=32,
norm_layer=BatchNormAct2d,
act_layer=nn.ReLU,
drop_rate=0.,
drop_path_rate=0.,
**kwargs,
):
"""
Args:
cfg (dict): Model architecture configuration
in_chans (int): Number of input channels (default: 3)
num_classes (int): Number of classifier classes (default: 1000)
global_pool (str): Global pooling type (default: 'avg')
output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32)
norm_layer (Union[str, nn.Module]): normalization layer
act_layer (Union[str, nn.Module]): activation layer
drop_rate (float): Dropout rate (default: 0.)
drop_path_rate (float): Stochastic depth drop-path rate (default: 0.)
kwargs (dict): Extra kwargs overlayed onto cfg
"""
super(VovNet, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
assert output_stride == 32 # FIXME support dilation
cfg = dict(cfg, **kwargs)
stem_stride = cfg.get("stem_stride", 4)
stem_chs = cfg["stem_chs"]
stage_conv_chs = cfg["stage_conv_chs"]
stage_out_chs = cfg["stage_out_chs"]
block_per_stage = cfg["block_per_stage"]
layer_per_block = cfg["layer_per_block"]
conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer)
# Stem module
last_stem_stride = stem_stride // 2
conv_type = SeparableConvNormAct if cfg["depthwise"] else ConvNormAct
self.stem = nn.Sequential(*[
ConvNormAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs),
conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs),
conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs),
])
self.feature_info = [dict(
num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')]
current_stride = stem_stride
# OSA stages
stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage)
in_ch_list = stem_chs[-1:] + stage_out_chs[:-1]
stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs)
stages = []
for i in range(4): # num_stages
downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4
stages += [OsaStage(
in_ch_list[i],
stage_conv_chs[i],
stage_out_chs[i],
block_per_stage[i],
layer_per_block,
downsample=downsample,
drop_path_rates=stage_dpr[i],
**stage_args,
)]
self.num_features = stage_out_chs[i]
current_stride *= 2 if downsample else 1
self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.head_hidden_size = self.num_features
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
for n, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.Linear):
nn.init.zeros_(m.bias)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)',
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
return self.stages(x)
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
# model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 &
# https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py
model_cfgs = dict(
vovnet39a=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 2, 2],
residual=False,
depthwise=False,
attn='',
),
vovnet57a=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 4, 3],
residual=False,
depthwise=False,
attn='',
),
ese_vovnet19b_slim_dw=dict(
stem_chs=[64, 64, 64],
stage_conv_chs=[64, 80, 96, 112],
stage_out_chs=[112, 256, 384, 512],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=True,
attn='ese',
),
ese_vovnet19b_dw=dict(
stem_chs=[64, 64, 64],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=True,
attn='ese',
),
ese_vovnet19b_slim=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[64, 80, 96, 112],
stage_out_chs=[112, 256, 384, 512],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet19b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet39b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 2, 2],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet57b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 4, 3],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet99b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 3, 9, 3],
residual=True,
depthwise=False,
attn='ese',
),
eca_vovnet39b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 2, 2],
residual=True,
depthwise=False,
attn='eca',
),
)
model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b']
def _create_vovnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
VovNet,
variant,
pretrained,
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs,
}
default_cfgs = generate_default_cfgs({
'vovnet39a.untrained': _cfg(url=''),
'vovnet57a.untrained': _cfg(url=''),
'ese_vovnet19b_slim_dw.untrained': _cfg(url=''),
'ese_vovnet19b_dw.ra_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'ese_vovnet19b_slim.untrained': _cfg(url=''),
'ese_vovnet39b.ra_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'ese_vovnet57b.untrained': _cfg(url=''),
'ese_vovnet99b.untrained': _cfg(url=''),
'eca_vovnet39b.untrained': _cfg(url=''),
'ese_vovnet39b_evos.untrained': _cfg(url=''),
})
@register_model
def vovnet39a(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs)
@register_model
def vovnet57a(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet19b_slim_dw(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet19b_dw(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet19b_slim(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet39b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet57b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet99b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs)
@register_model
def eca_vovnet39b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs)
# Experimental Models
@register_model
def ese_vovnet39b_evos(pretrained=False, **kwargs) -> VovNet:
def norm_act_fn(num_features, **nkwargs):
return create_norm_act_layer('evonorms0', num_features, jit=False, **nkwargs)
return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs)
|
pytorch-image-models/timm/models/vovnet.py/0
|
{
"file_path": "pytorch-image-models/timm/models/vovnet.py",
"repo_id": "pytorch-image-models",
"token_count": 7935
}
| 216
|
import math
import torch
from torch.optim.optimizer import Optimizer
class Nadam(Optimizer):
"""Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).
It has been proposed in `Incorporating Nesterov Momentum into Adam`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
schedule_decay (float, optional): momentum schedule decay (default: 4e-3)
__ http://cs229.stanford.edu/proj2015/054_report.pdf
__ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
Originally taken from: https://github.com/pytorch/pytorch/pull/1408
NOTE: Has potential issues but does work well on some problems.
"""
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, schedule_decay=4e-3):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
schedule_decay=schedule_decay,
)
super(Nadam, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['m_schedule'] = 1.
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
# Warming momentum schedule
m_schedule = state['m_schedule']
schedule_decay = group['schedule_decay']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
eps = group['eps']
state['step'] += 1
t = state['step']
bias_correction2 = 1 - beta2 ** t
if group['weight_decay'] != 0:
grad = grad.add(p, alpha=group['weight_decay'])
momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay)))
momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay)))
m_schedule_new = m_schedule * momentum_cache_t
m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1
state['m_schedule'] = m_schedule_new
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new))
p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next))
return loss
|
pytorch-image-models/timm/optim/nadam.py/0
|
{
"file_path": "pytorch-image-models/timm/optim/nadam.py",
"repo_id": "pytorch-image-models",
"token_count": 1921
}
| 217
|
""" TanH Scheduler
TanH schedule with warmup, cycle/restarts, noise.
Hacked together by / Copyright 2021 Ross Wightman
"""
import logging
import math
import numpy as np
import torch
from typing import List
from .scheduler import Scheduler
_logger = logging.getLogger(__name__)
class TanhLRScheduler(Scheduler):
"""
Hyberbolic-Tangent decay with restarts.
This is described in the paper https://arxiv.org/abs/1806.01593
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lb: float = -7.,
ub: float = 3.,
lr_min: float = 0.,
cycle_mul: float = 1.,
cycle_decay: float = 1.,
cycle_limit: int = 1,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer,
param_group_field="lr",
t_in_epochs=t_in_epochs,
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
assert t_initial > 0
assert lr_min >= 0
assert lb < ub
assert cycle_limit >= 0
assert warmup_t >= 0
assert warmup_lr_init >= 0
self.lb = lb
self.ub = ub
self.t_initial = t_initial
self.lr_min = lr_min
self.cycle_mul = cycle_mul
self.cycle_decay = cycle_decay
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
if self.warmup_t:
t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t)
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t: int) -> List[float]:
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.cycle_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul))
t_i = self.cycle_mul ** i * self.t_initial
t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
if i < self.cycle_limit:
gamma = self.cycle_decay ** i
lr_max_values = [v * gamma for v in self.base_values]
tr = t_curr / t_i
lrs = [
self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr))
for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_cycle_length(self, cycles=0):
cycles = max(1, cycles or self.cycle_limit)
if self.cycle_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
|
pytorch-image-models/timm/scheduler/tanh_lr.py/0
|
{
"file_path": "pytorch-image-models/timm/scheduler/tanh_lr.py",
"repo_id": "pytorch-image-models",
"token_count": 1972
}
| 218
|
import random
import numpy as np
import torch
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
|
pytorch-image-models/timm/utils/random.py/0
|
{
"file_path": "pytorch-image-models/timm/utils/random.py",
"repo_id": "pytorch-image-models",
"token_count": 68
}
| 219
|
<div align="center">
<a href="https://www.youtube.com/watch?v=jlMAX2Oaht0">
<img width=560 width=315 alt="Making TGI deployment optimal" src="https://huggingface.co/datasets/Narsil/tgi_assets/resolve/main/thumbnail.png">
</a>
# Text Generation Inference
<a href="https://github.com/huggingface/text-generation-inference">
<img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/huggingface/text-generation-inference?style=social">
</a>
<a href="https://huggingface.github.io/text-generation-inference">
<img alt="Swagger API documentation" src="https://img.shields.io/badge/API-Swagger-informational">
</a>
A Rust, Python and gRPC server for text generation inference. Used in production at [Hugging Face](https://huggingface.co)
to power Hugging Chat, the Inference API and Inference Endpoint.
</div>
## Table of contents
- [Get Started](#get-started)
- [Docker](#docker)
- [API documentation](#api-documentation)
- [Using a private or gated model](#using-a-private-or-gated-model)
- [A note on Shared Memory (shm)](#a-note-on-shared-memory-shm)
- [Distributed Tracing](#distributed-tracing)
- [Architecture](#architecture)
- [Local install](#local-install)
- [Optimized architectures](#optimized-architectures)
- [Run locally](#run-locally)
- [Run](#run)
- [Quantization](#quantization)
- [Develop](#develop)
- [Testing](#testing)
Text Generation Inference (TGI) is a toolkit for deploying and serving Large Language Models (LLMs). TGI enables high-performance text generation for the most popular open-source LLMs, including Llama, Falcon, StarCoder, BLOOM, GPT-NeoX, and [more](https://huggingface.co/docs/text-generation-inference/supported_models). TGI implements many features, such as:
- Simple launcher to serve most popular LLMs
- Production ready (distributed tracing with Open Telemetry, Prometheus metrics)
- Tensor Parallelism for faster inference on multiple GPUs
- Token streaming using Server-Sent Events (SSE)
- Continuous batching of incoming requests for increased total throughput
- [Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api) compatible with Open AI Chat Completion API
- Optimized transformers code for inference using [Flash Attention](https://github.com/HazyResearch/flash-attention) and [Paged Attention](https://github.com/vllm-project/vllm) on the most popular architectures
- Quantization with :
- [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)
- [GPT-Q](https://arxiv.org/abs/2210.17323)
- [EETQ](https://github.com/NetEase-FuXi/EETQ)
- [AWQ](https://github.com/casper-hansen/AutoAWQ)
- [Marlin](https://github.com/IST-DASLab/marlin)
- [fp8](https://developer.nvidia.com/blog/nvidia-arm-and-intel-publish-fp8-specification-for-standardization-as-an-interchange-format-for-ai/)
- [Safetensors](https://github.com/huggingface/safetensors) weight loading
- Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
- Logits warper (temperature scaling, top-p, top-k, repetition penalty, more details see [transformers.LogitsProcessor](https://huggingface.co/docs/transformers/internal/generation_utils#transformers.LogitsProcessor))
- Stop sequences
- Log probabilities
- [Speculation](https://huggingface.co/docs/text-generation-inference/conceptual/speculation) ~2x latency
- [Guidance/JSON](https://huggingface.co/docs/text-generation-inference/conceptual/guidance). Specify output format to speed up inference and make sure the output is valid according to some specs..
- Custom Prompt Generation: Easily generate text by providing custom prompts to guide the model's output
- Fine-tuning Support: Utilize fine-tuned models for specific tasks to achieve higher accuracy and performance
### Hardware support
- [Nvidia](https://github.com/huggingface/text-generation-inference/pkgs/container/text-generation-inference)
- [AMD](https://github.com/huggingface/text-generation-inference/pkgs/container/text-generation-inference) (-rocm)
- [Inferentia](https://github.com/huggingface/optimum-neuron/tree/main/text-generation-inference)
- [Intel GPU](https://github.com/huggingface/text-generation-inference/pull/1475)
- [Gaudi](https://github.com/huggingface/tgi-gaudi)
- [Google TPU](https://huggingface.co/docs/optimum-tpu/howto/serving)
## Get Started
### Docker
For a detailed starting guide, please see the [Quick Tour](https://huggingface.co/docs/text-generation-inference/quicktour). The easiest way of getting started is using the official Docker container:
```shell
model=HuggingFaceH4/zephyr-7b-beta
# share a volume with the Docker container to avoid downloading weights every run
volume=$PWD/data
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.2.0 --model-id $model
```
And then you can make requests like
```bash
curl 127.0.0.1:8080/generate_stream \
-X POST \
-d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \
-H 'Content-Type: application/json'
```
You can also use [TGI's Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api) to obtain Open AI Chat Completion API compatible responses.
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
**Note:** To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 12.2 or higher. For running the Docker container on a machine with no GPUs or CUDA support, it is enough to remove the `--gpus all` flag and add `--disable-custom-kernels`, please note CPU is not the intended platform for this project, so performance might be subpar.
**Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/supported_models#supported-hardware). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.2.0-rocm --model-id $model` instead of the command above.
To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli):
```
text-generation-launcher --help
```
### API documentation
You can consult the OpenAPI documentation of the `text-generation-inference` REST API using the `/docs` route.
The Swagger UI is also available at: [https://huggingface.github.io/text-generation-inference](https://huggingface.github.io/text-generation-inference).
### Using a private or gated model
You have the option to utilize the `HF_TOKEN` environment variable for configuring the token employed by
`text-generation-inference`. This allows you to gain access to protected resources.
For example, if you want to serve the gated Llama V2 model variants:
1. Go to https://huggingface.co/settings/tokens
2. Copy your cli READ token
3. Export `HF_TOKEN=<your cli READ token>`
or with Docker:
```shell
model=meta-llama/Meta-Llama-3.1-8B-Instruct
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
token=<your cli READ token>
docker run --gpus all --shm-size 1g -e HF_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.0 --model-id $model
```
### A note on Shared Memory (shm)
[`NCCL`](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html) is a communication framework used by
`PyTorch` to do distributed training/inference. `text-generation-inference` make
use of `NCCL` to enable Tensor Parallelism to dramatically speed up inference for large language models.
In order to share data between the different devices of a `NCCL` group, `NCCL` might fall back to using the host memory if
peer-to-peer using NVLink or PCI is not possible.
To allow the container to use 1G of Shared Memory and support SHM sharing, we add `--shm-size 1g` on the above command.
If you are running `text-generation-inference` inside `Kubernetes`. You can also add Shared Memory to the container by
creating a volume with:
```yaml
- name: shm
emptyDir:
medium: Memory
sizeLimit: 1Gi
```
and mounting it to `/dev/shm`.
Finally, you can also disable SHM sharing by using the `NCCL_SHM_DISABLE=1` environment variable. However, note that
this will impact performance.
### Distributed Tracing
`text-generation-inference` is instrumented with distributed tracing using OpenTelemetry. You can use this feature
by setting the address to an OTLP collector with the `--otlp-endpoint` argument. The default service name can be
overridden with the `--otlp-service-name` argument
### Architecture

### Local install
You can also opt to install `text-generation-inference` locally.
First [install Rust](https://rustup.rs/) and create a Python virtual environment with at least
Python 3.9, e.g. using `conda`:
```shell
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
conda create -n text-generation-inference python=3.11
conda activate text-generation-inference
```
You may also need to install Protoc.
On Linux:
```shell
PROTOC_ZIP=protoc-21.12-linux-x86_64.zip
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP
sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc
sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*'
rm -f $PROTOC_ZIP
```
On MacOS, using Homebrew:
```shell
brew install protobuf
```
Then run:
```shell
BUILD_EXTENSIONS=True make install # Install repository and HF/transformer fork with CUDA kernels
text-generation-launcher --model-id mistralai/Mistral-7B-Instruct-v0.2
```
**Note:** on some machines, you may also need the OpenSSL libraries and gcc. On Linux machines, run:
```shell
sudo apt-get install libssl-dev gcc -y
```
## Optimized architectures
TGI works out of the box to serve optimized models for all modern models. They can be found in [this list](https://huggingface.co/docs/text-generation-inference/supported_models).
Other architectures are supported on a best-effort basis using:
`AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")`
or
`AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto")`
## Run locally
### Run
```shell
text-generation-launcher --model-id mistralai/Mistral-7B-Instruct-v0.2
```
### Quantization
You can also run pre-quantized weights (AWQ, GPTQ, Marlin) or on-the-fly quantize weights with bitsandbytes, EETQ, fp8, to reduce the VRAM requirement:
```shell
text-generation-launcher --model-id mistralai/Mistral-7B-Instruct-v0.2 --quantize
```
4bit quantization is available using the [NF4 and FP4 data types from bitsandbytes](https://arxiv.org/pdf/2305.14314.pdf). It can be enabled by providing `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` as a command line argument to `text-generation-launcher`.
Read more about quantization in the [Quantization documentation](https://huggingface.co/docs/text-generation-inference/en/conceptual/quantization).
## Develop
```shell
make server-dev
make router-dev
```
## Testing
```shell
# python
make python-server-tests
make python-client-tests
# or both server and client tests
make python-tests
# rust cargo tests
make rust-tests
# integration tests
make integration-tests
```
|
text-generation-inference/README.md/0
|
{
"file_path": "text-generation-inference/README.md",
"repo_id": "text-generation-inference",
"token_count": 3849
}
| 220
|
use thiserror::Error;
use text_generation_router::server;
#[derive(Debug, Error)]
pub enum TensorRtLlmBackendError {
#[error("Tokenizer error: {0}")]
Tokenizer(String),
#[error("Argument validation error: {0}")]
ArgumentValidation(String),
#[error("WebServer error: {0}")]
WebServer(#[from] server::WebServerError),
#[error("Tokio runtime failed to start: {0}")]
Tokio(#[from] std::io::Error),
}
|
text-generation-inference/backends/trtllm/src/errors.rs/0
|
{
"file_path": "text-generation-inference/backends/trtllm/src/errors.rs",
"repo_id": "text-generation-inference",
"token_count": 171
}
| 221
|
use crate::block_allocator::{Allocator, BlockAllocation};
use slotmap::{DefaultKey, SlotMap};
use std::{
collections::{BTreeSet, HashMap},
sync::Arc,
};
pub struct RadixAllocator {
allocation_id: u64,
allocations: HashMap<u64, RadixAllocation>,
cache_blocks: RadixTrie,
/// Blocks that are immediately available for allocation.
free_blocks: Vec<u32>,
#[allow(dead_code)]
// This isn't used because the prefix need to match without the windowing
// mecanism. This at worst is overallocating, not necessarily being wrong.
window_size: Option<u32>,
block_size: u32,
}
impl RadixAllocator {
pub fn new(block_size: u32, n_blocks: u32, window_size: Option<u32>) -> Self {
RadixAllocator {
allocation_id: 0,
allocations: HashMap::new(),
cache_blocks: RadixTrie::new(block_size as usize),
// Block 0 is reserved for health checks.
free_blocks: (1..n_blocks).collect(),
window_size,
block_size,
}
}
fn alloc_or_reclaim(&mut self, n_blocks_needed: usize) -> Option<Vec<u32>> {
if self.free_blocks.len() < n_blocks_needed {
// This is a bit annoying, we first extend the free list and then
// split it off again below. This is because we need to put it on
// the free list if we cannot allocate enough blocks. This is only
// temporary, the trie needs to be able to report whether it can
// allocate the requested amount. Just not implemented yet.
self.free_blocks.extend(
self.cache_blocks
.evict(n_blocks_needed - self.free_blocks.len()),
);
}
if self.free_blocks.len() >= n_blocks_needed {
Some(
self.free_blocks
.split_off(self.free_blocks.len() - n_blocks_needed),
)
} else {
None
}
}
}
// Allocator trait
impl Allocator for RadixAllocator {
fn allocate(
&mut self,
tokens: u32,
prefill_tokens: Option<Arc<Vec<u32>>>,
) -> Option<BlockAllocation> {
let mut blocks = vec![];
let prefix_node = if let Some(prefill_tokens) = prefill_tokens.as_ref() {
let node_id = self
.cache_blocks
.find(prefill_tokens.as_slice(), &mut blocks);
// Even if this allocation fails below, we need to increase he
// refcount to ensure that the prefix that was found is not evicted.
node_id
} else {
self.cache_blocks.root_id()
};
self.cache_blocks
.incref(prefix_node)
.expect("Failed to increment refcount");
let prefix_len = blocks.len() * self.block_size as usize;
let suffix_len = tokens - prefix_len as u32;
let suffix_blocks = (suffix_len + self.block_size - 1) / self.block_size;
match self.alloc_or_reclaim(suffix_blocks as usize) {
Some(suffix_blocks) => blocks.extend(suffix_blocks),
None => {
self.cache_blocks
.decref(prefix_node)
.expect("Failed to decrement refcount");
return None;
}
}
// 1:1 mapping of blocks and slots.
let slots = if self.block_size == 1 {
blocks.clone()
} else {
let mut slots = Vec::with_capacity(blocks.len() * self.block_size as usize);
'slots: for block_id in &blocks {
for s in (block_id * self.block_size)..((block_id + 1) * self.block_size) {
slots.push(s);
if slots.len() as u32 == tokens {
break 'slots;
}
}
}
slots
};
let allocation = RadixAllocation {
prefix_node,
cached_prefix_len: prefix_len,
prefill_tokens: prefill_tokens.clone(),
};
tracing::debug!("Blocks {blocks:?}");
self.allocation_id += 1;
self.allocations.insert(self.allocation_id, allocation);
Some(BlockAllocation {
allocation_id: self.allocation_id,
block_allocator: None,
blocks,
slots,
prefix_len: prefix_len as u32,
})
}
fn free(&mut self, blocks: Vec<u32>, allocation_id: u64) {
let allocation = match self.allocations.remove(&allocation_id) {
Some(allocation) => allocation,
None => unreachable!("Tried to free an unknown allocation."),
};
self.cache_blocks
.decref(allocation.prefix_node)
.expect("Failed to decrement refcount");
if let Some(prefill_tokens) = allocation.prefill_tokens {
let prefill_tokens = prefill_tokens.as_slice();
// If there are prefill tokens that did not come from the cache,
// add them to the cache.
if prefill_tokens.len() > allocation.cached_prefix_len {
let aligned =
(prefill_tokens.len() / self.block_size as usize) * self.block_size as usize;
if aligned > 0 {
let prefix_len = self
.cache_blocks
.insert(
&prefill_tokens[..aligned],
&blocks[..aligned / self.block_size as usize],
)
// Unwrap, failing is a programming error.
.expect("Failed to store prefill tokens");
// We can have a prefill with the following structure:
//
// |---| From the prefix cache.
// A B C D E F G
//|--------| Found in the trie during insertion.
//
// This means that while processing this request there was a
// partially overlapping request that had A..=E in its
// prefill. In this case we need to free the blocks D E.
if prefix_len > allocation.cached_prefix_len {
self.free_blocks.extend(
&blocks[allocation.cached_prefix_len / self.block_size as usize
..prefix_len / self.block_size as usize],
);
}
}
}
// Free non-prefill blocks.
self.free_blocks
.extend(&blocks[prefill_tokens.len() / self.block_size as usize..]);
} else {
self.free_blocks.extend(blocks);
}
}
}
struct RadixAllocation {
prefix_node: NodeId,
cached_prefix_len: usize,
prefill_tokens: Option<Arc<Vec<u32>>>,
}
// Radix trie that is heavily inspired by radix attention from sglang.
//
// The trie is optimized for prefix caching:
//
// - A normal radix trie stores discrete values. In this radix trie,
// inserting *abc* with value *xyz* will also enable lookup for
// *a* (*x*) and *ab* (*xy*).
// - As a result, every value is required to have the same length as
// the key.
// - We store additional information in each node, such as last access
// time and a reference count.
#[derive(Debug)]
pub enum TrieError {
InvalidNodeId,
RefCountUnderflow,
BlockTokenCountMismatch,
}
pub type NodeId = DefaultKey;
#[derive(Debug)]
pub struct RadixTrie {
/// Identifier of the root nod.
root: DefaultKey,
/// Leave node identifiers ordered by increasing recency.
leaves: BTreeSet<(u64, NodeId)>,
/// All trie nodes.
nodes: SlotMap<NodeId, TrieNode>,
/// Time as a monotonically increating counter to avoid the system
/// call that a real time lookup would require.
time: u64,
/// All blocks need to be aligned with this
block_size: usize,
}
impl RadixTrie {
/// Construct a new radix trie.
pub fn new(block_size: usize) -> Self {
let root = TrieNode::new(vec![], vec![], 0, None);
let mut nodes = SlotMap::new();
let root = nodes.insert(root);
RadixTrie {
leaves: BTreeSet::new(),
nodes,
root,
time: 0,
block_size,
}
}
/// Find the prefix of the given tokens.
///
/// The blocks corresponding to the part of the prefix that could be found
/// are written to `blocks`. The number of blocks is in `0..=tokens.len()`.
/// Returns the identifier of the trie node that contains the longest
/// prefix. The node identifier can be used by callers to e.g. increase its
/// reference count.
///
/// Using this method will update the access time of the traversed nodes.
pub fn find(&mut self, key: &[u32], blocks: &mut Vec<u32>) -> NodeId {
self.time += 1;
self.find_(self.root, key, blocks)
}
/// Find worker.
fn find_(&mut self, mut node_id: NodeId, key: &[u32], blocks: &mut Vec<u32>) -> NodeId {
let node = &self.nodes[node_id];
if let Some(&child_id) = node.children.get(&key[0]) {
self.update_access_time(child_id);
let child = self.nodes.get(child_id).expect("Invalid child identifier");
let shared_prefix_len = shared_prefix(&child.key, key, self.block_size);
assert_eq!(shared_prefix_len % self.block_size, 0);
blocks.extend(&child.blocks[..shared_prefix_len / self.block_size]);
let key = &key[shared_prefix_len..];
if !key.is_empty() {
node_id = self.find_(child_id, key, blocks);
}
}
node_id
}
/// Decrease the reference count of a node.
pub fn decref(&mut self, node_id: NodeId) -> Result<(), TrieError> {
// We don't care about refcounting for root, since it will never
// be evicted.
if node_id == self.root {
return Ok(());
}
let node = self
.nodes
.get_mut(node_id)
.ok_or(TrieError::InvalidNodeId)?;
if node.ref_count == 0 {
return Err(TrieError::RefCountUnderflow);
}
node.ref_count -= 1;
if node.ref_count == 0 {
self.leaves.insert((node.last_accessed, node_id));
}
Ok(())
}
/// Increase the reference count of a node.
pub fn incref(&mut self, node_id: NodeId) -> Result<(), TrieError> {
if node_id == self.root {
return Ok(());
}
let node = self
.nodes
.get_mut(node_id)
.ok_or(TrieError::InvalidNodeId)?;
if node.ref_count == 0 {
self.leaves.remove(&(node.last_accessed, node_id));
}
node.ref_count += 1;
Ok(())
}
/// Evict `n_blocks` from the trie.
///
/// Returns the evicted blocks. When the length is less than `n_blocks`,
/// not enough blocks could beevicted.
pub fn evict(&mut self, n_blocks: usize) -> Vec<u32> {
// NOTE: we don't return Result here. If any of the unwrapping fails,
// it's a programming error in the trie implementation, not a user
// error caused by e.g. an invalid argument.
// TODO: add some bookkeeping in the future to check whether we can
// evict n_blocks and return `None` if we can't. We are now needlessly
// evicting prefixes from the cache in such a case.
let mut evicted = Vec::new();
while let Some((last_access, node_id)) = self.leaves.pop_first() {
let blocks_needed = n_blocks - evicted.len();
let node = self.nodes.get(node_id).expect("Leave does not exist");
if blocks_needed >= node.blocks.len() {
// We need to evict the whole node if we need more blocks than it has.
let node = self.remove_node(node_id);
evicted.extend(node.blocks);
if evicted.len() >= n_blocks {
break;
}
} else {
// The node has more blocks than needed, so we'll just remove
// the required number of blocks and leave the remaining blocks
// untouched.
let node = self.nodes.get_mut(node_id).expect("Leave does not exist");
node.key.truncate(node.blocks.len() - blocks_needed);
evicted.extend(node.blocks.split_off(node.blocks.len() - blocks_needed));
self.leaves.insert((last_access, node_id));
break;
}
}
evicted
}
/// Insert a prefill along with its blocks.
///
/// This method returns the length of the prefix that was already
/// in the trie. E.g. if the length is 10, this means that for
/// the first 10 elements of the tree **the blocks are not updated**.
pub fn insert(&mut self, tokens: &[u32], blocks: &[u32]) -> Result<usize, TrieError> {
self.time += 1;
let common = self.insert_(self.root, tokens, blocks)?;
Ok(common)
}
/// Insertion worker.
fn insert_(
&mut self,
node_id: NodeId,
tokens: &[u32],
blocks: &[u32],
) -> Result<usize, TrieError> {
// TODO: in the future we may want to check that the blocks match for
// the part of the prefix that is already in the trie to detect
// mismatches.
if tokens.len() != blocks.len() * self.block_size {
return Err(TrieError::BlockTokenCountMismatch);
}
if let Some(&child_id) = self.nodes[node_id].children.get(&tokens[0]) {
self.update_access_time(child_id);
let child = self
.nodes
.get_mut(child_id)
// Unwrap here, since failure is a bug.
.expect("Child node does not exist");
let shared_prefix_len = shared_prefix(&child.key, tokens, self.block_size);
// We are done, the prefix is already in the trie.
if shared_prefix_len == tokens.len() || shared_prefix_len == 0 {
return Ok(shared_prefix_len);
}
// The node's prefix is a prefix of the insertion prefix.
if shared_prefix_len == child.key.len() {
return Ok(shared_prefix_len
+ self.insert_(
child_id,
&tokens[shared_prefix_len..],
&blocks[shared_prefix_len / self.block_size..],
)?);
}
// The node's prefix and the insertion prefix only match partially,
// split the node to just contain the matching part. Then insert the
// remainder of the prefix into the node again
let child_id = self.split_node(child_id, shared_prefix_len);
let key = &tokens[shared_prefix_len..];
let blocks = &blocks[shared_prefix_len / self.block_size..];
Ok(shared_prefix_len + self.insert_(child_id, key, blocks)?)
} else {
self.add_node(node_id, tokens, blocks);
Ok(0)
}
}
fn split_node(&mut self, node_id: NodeId, prefix_len: usize) -> NodeId {
// We have to make the current node a child to ensure that its
// properties and node id stay the same.
// This funcion unwraps, an invalid node_id is a programming error.
let node = self
.nodes
.get_mut(node_id)
.expect("Node to-be split does not exist");
let mut parent_key = node.key.split_off(prefix_len);
let mut parent_blocks = node.blocks.split_off(prefix_len);
// Move first part of the prefix to the parent. We swap to avoid
// an allocation + copy for both splits of the key/blocks.
std::mem::swap(&mut node.key, &mut parent_key);
std::mem::swap(&mut node.blocks, &mut parent_blocks);
let node_key = node.key[0];
let grandparent_id = node.parent.expect("Node does not have a parent");
let parent_id = self.add_node(grandparent_id, parent_key, parent_blocks);
self.add_node_to_parent(parent_id, node_key, node_id);
// Reborrow to make the borrow checker happy.
let node = self
.nodes
.get_mut(node_id)
.expect("Node to-be split does not exist");
node.parent = Some(parent_id);
parent_id
}
/// Create a node and add it to the parent.
fn add_node(
&mut self,
parent_id: NodeId,
key: impl Into<Vec<u32>>,
blocks: impl Into<Vec<u32>>,
) -> NodeId {
let key = key.into();
let blocks = blocks.into();
let first = key[0];
let child = TrieNode::new(key, blocks, self.time, Some(parent_id));
let child_id = self.nodes.insert(child);
self.add_node_to_parent(parent_id, first, child_id);
self.leaves.insert((self.time, child_id));
child_id
}
/// Add a node to the parent.
fn add_node_to_parent(&mut self, parent_id: NodeId, first: u32, child_id: NodeId) {
// Unwrap here, passing in an unknown id is a programming error.
let parent = self.nodes.get_mut(parent_id).expect("Unknown parent node");
if parent.children.insert(first, child_id).is_none() {
// Only increase reference count if child does not replace another child.
self.incref(parent_id)
.expect("Failed to increase parent refcount");
}
}
/// Remove a node from the trie.
fn remove_node(&mut self, node_id: NodeId) -> TrieNode {
// Unwrap here, passing in an unknown id is a programming error.
let node = self.nodes.remove(node_id).expect("Unknown node");
let parent_id = node.parent.expect("Attempted to remove root node");
let parent = self.nodes.get_mut(parent_id).expect("Unknown parent node");
parent.children.remove(&node.key[0]);
self.decref(parent_id)
.expect("Failed to decrease parent refcount");
self.nodes.remove(node_id);
node
}
fn update_access_time(&mut self, node_id: NodeId) {
// Unwrap here, passing in an unknown id is a programming error.
let node = self.nodes.get_mut(node_id).expect("Unknown node");
// Update the ordered leaves set if the node is a leave.
if self.leaves.remove(&(node.last_accessed, node_id)) {
self.leaves.insert((self.time, node_id));
}
node.last_accessed = self.time;
}
#[allow(dead_code)]
#[doc(hidden)]
/// Print debugging output for the trie.
///
/// In contrast to `Debug` nicely formatted.
pub fn print_debug(&self) {
self.print_debug_(self.root, 0);
}
fn print_debug_(&self, node_id: NodeId, indent: usize) {
let node = &self.nodes[node_id];
eprintln!(
"{}{:?}, key: {:?}, blocks: {:?}, ref_count: {}, last_accessed: {}, parent: {:?}, children: {:?}",
" ".repeat(indent),
node_id,
node.key,
node.blocks,
node.ref_count,
node.last_accessed,
node.parent,
node.children
);
for child_id in self.nodes[node_id].children.values() {
self.print_debug_(*child_id, indent + 2);
}
}
pub(crate) fn root_id(&self) -> DefaultKey {
self.root
}
}
/// Trie node.
#[derive(Debug)]
struct TrieNode {
blocks: Vec<u32>,
children: HashMap<u32, NodeId>,
key: Vec<u32>,
last_accessed: u64,
parent: Option<NodeId>,
ref_count: usize,
}
impl TrieNode {
fn new(key: Vec<u32>, blocks: Vec<u32>, last_accessed: u64, parent: Option<NodeId>) -> Self {
TrieNode {
children: HashMap::new(),
key,
blocks,
last_accessed,
parent,
ref_count: 0,
}
}
}
fn shared_prefix(left: &[u32], right: &[u32], block_size: usize) -> usize {
let full = left.iter().zip(right).take_while(|(a, b)| a == b).count();
(full / block_size) * block_size
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::*;
#[test]
fn allocator_block_size() {
let mut cache = RadixAllocator::new(2, 12, None);
let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation.blocks, vec![8, 9, 10, 11]);
assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22, 23]);
assert_eq!(allocation.prefix_len, 0);
cache.free(allocation.blocks.clone(), allocation.allocation_id);
let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation.blocks, vec![8, 9, 10, 11]);
assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22, 23]);
assert_eq!(allocation.prefix_len, 4);
}
#[test]
fn allocator_block_size_non_aligned() {
let mut cache = RadixAllocator::new(2, 12, None);
let allocation = cache.allocate(7, Some(Arc::new(vec![0, 1, 2]))).unwrap();
assert_eq!(allocation.blocks, vec![8, 9, 10, 11]);
assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22]);
assert_eq!(allocation.prefix_len, 0);
cache.free(allocation.blocks.clone(), allocation.allocation_id);
let allocation = cache.allocate(7, Some(Arc::new(vec![0, 1, 2]))).unwrap();
assert_eq!(allocation.blocks, vec![8, 9, 10, 11]);
assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22]);
assert_eq!(allocation.prefix_len, 2);
}
#[test]
fn allocator_reuses_prefixes() {
let mut cache = RadixAllocator::new(1, 12, None);
let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation.blocks, vec![4, 5, 6, 7, 8, 9, 10, 11]);
assert_eq!(allocation.blocks, allocation.slots);
assert_eq!(allocation.prefix_len, 0);
cache.free(allocation.blocks.clone(), allocation.allocation_id);
let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation.blocks, vec![4, 5, 6, 7, 8, 9, 10, 11]);
assert_eq!(allocation.prefix_len, 4);
}
#[test]
fn allocator_collects_older_prefixes_first() {
let mut cache = RadixAllocator::new(1, 7, None);
let allocation1 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation1.blocks, vec![3, 4, 5, 6]);
assert_eq!(allocation1.prefix_len, 0);
let allocation2 = cache.allocate(2, Some(Arc::new(vec![4, 5]))).unwrap();
assert_eq!(allocation2.blocks, vec![1, 2]);
assert_eq!(allocation2.prefix_len, 0);
cache.free(allocation1.blocks.clone(), allocation1.allocation_id);
cache.free(allocation2.blocks.clone(), allocation2.allocation_id);
// We should get the blocks of the first allocation, since they are more recent.
let allocation3 = cache.allocate(4, Some(Arc::new(vec![6, 7, 8, 9]))).unwrap();
assert_eq!(allocation3.blocks, vec![3, 4, 5, 6]);
assert_eq!(allocation3.prefix_len, 0);
}
#[test]
fn allocator_frees_fully_overlapping_prefills() {
let mut cache = RadixAllocator::new(1, 10, None);
let allocation1 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
let allocation2 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
cache.free(allocation2.blocks.clone(), allocation2.allocation_id);
cache.free(allocation1.blocks.clone(), allocation1.allocation_id);
let allocation3 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap();
assert_eq!(allocation3.prefix_len, 4);
// 10 blocks, of which 1 reserved for health checks, 4 for the cached blocks.
assert_eq!(cache.free_blocks.len(), 5);
}
#[test]
fn allocator_frees_partially_overlapping_prefills() {
let mut cache = RadixAllocator::new(1, 20, None);
let allocation1 = cache.allocate(4, Some(Arc::new(vec![0, 1]))).unwrap();
assert_eq!(allocation1.blocks, vec![16, 17, 18, 19]);
assert_eq!(allocation1.prefix_len, 0);
cache.free(allocation1.blocks.clone(), allocation1.allocation_id);
let allocation2 = cache
.allocate(8, Some(Arc::new(vec![0, 1, 2, 3, 4, 5])))
.unwrap();
assert_eq!(allocation2.blocks, vec![16, 17, 12, 13, 14, 15, 18, 19]);
assert_eq!(allocation2.prefix_len, 2);
let allocation3 = cache
.allocate(8, Some(Arc::new(vec![0, 1, 2, 3, 6, 7])))
.unwrap();
assert_eq!(allocation3.blocks, vec![16, 17, 6, 7, 8, 9, 10, 11]);
assert_eq!(allocation3.prefix_len, 2);
cache.free(allocation3.blocks.clone(), allocation3.allocation_id);
cache.free(allocation2.blocks.clone(), allocation2.allocation_id);
// 20 blocks, of which 1 reserved for health checks, 6 for allocation3, 2 for allocation2.
assert_eq!(cache.free_blocks.len(), 11);
let allocation4 = cache
.allocate(6, Some(Arc::new(vec![0, 1, 2, 3, 4, 5])))
.unwrap();
assert_eq!(allocation4.blocks, vec![16, 17, 6, 7, 14, 15]);
assert_eq!(allocation4.prefix_len, 6);
assert_eq!(cache.free_blocks.len(), 11);
let allocation5 = cache
.allocate(6, Some(Arc::new(vec![0, 1, 2, 3, 6, 7])))
.unwrap();
assert_eq!(allocation5.blocks, vec![16, 17, 6, 7, 8, 9]);
assert_eq!(allocation5.prefix_len, 6);
assert_eq!(cache.free_blocks.len(), 11);
}
#[test]
fn trie_insertions_have_correct_prefix_len() {
let mut trie = RadixTrie::new(1);
assert_eq!(trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap(), 0);
// Already exists.
assert_eq!(trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap(), 3);
// Completely new at root-level
assert_eq!(trie.insert(&[1, 2, 3], &[1, 2, 3]).unwrap(), 0);
// Contains full prefix, but longer.
assert_eq!(trie.insert(&[0, 1, 2, 3, 4], &[0, 1, 2, 3, 4]).unwrap(), 3);
// Shares partial prefix, we need a split.
assert_eq!(
trie.insert(&[0, 1, 2, 3, 5, 6, 7], &[0, 1, 2, 3, 5, 6, 7])
.unwrap(),
4
);
}
#[test]
fn trie_insertions_block_size() {
let mut trie = RadixTrie::new(2);
assert_eq!(trie.insert(&[0, 1, 2, 3], &[0, 1]).unwrap(), 0);
// Already exists.
// But needs to be block_size aligned
assert_eq!(trie.insert(&[0, 1, 2, 3], &[0, 1]).unwrap(), 4);
// Completely new at root-level
assert_eq!(trie.insert(&[1, 2, 3, 4], &[1, 2]).unwrap(), 0);
// Contains full prefix, but longer.
assert_eq!(trie.insert(&[0, 1, 2, 3, 4, 5], &[0, 1, 2]).unwrap(), 4);
// Shares partial prefix, we need a split.
assert_eq!(
trie.insert(&[0, 1, 3, 4, 5, 6, 7, 8], &[0, 1, 2, 3])
.unwrap(),
2
);
}
#[test]
fn trie_get_returns_correct_blocks() {
let mut trie = RadixTrie::new(1);
trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap();
trie.insert(&[1, 2, 3], &[1, 2, 3]).unwrap();
trie.insert(&[0, 1, 2, 3, 4], &[0, 1, 2, 3, 4]).unwrap();
trie.insert(&[0, 1, 2, 3, 5, 6, 7], &[0, 1, 2, 3, 5, 6, 7])
.unwrap();
let mut blocks = Vec::new();
trie.find(&[0], &mut blocks);
assert_eq!(blocks, vec![0]);
blocks.clear();
trie.find(&[0, 1, 2], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2]);
blocks.clear();
trie.find(&[1, 2, 3], &mut blocks);
assert_eq!(blocks, vec![1, 2, 3]);
blocks.clear();
trie.find(&[0, 1, 2, 3], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2, 3]);
blocks.clear();
trie.find(&[0, 1, 2, 3, 4], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2, 3, 4]);
blocks.clear();
trie.find(&[0, 1, 2, 3, 5], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2, 3, 5]);
}
#[test]
fn trie_evict_removes_correct_blocks() {
let mut trie = RadixTrie::new(1);
trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap();
trie.insert(&[0, 1, 2, 3, 5, 6, 7], &[0, 1, 2, 3, 5, 6, 7])
.unwrap();
trie.insert(&[0, 1, 2, 3, 4], &[0, 1, 2, 3, 4]).unwrap();
trie.insert(&[1, 2, 3], &[1, 2, 3]).unwrap();
let mut blocks = Vec::new();
// Remove less than the leave blocks.
assert_eq!(trie.evict(1), vec![7]);
trie.find(&[0, 1, 2, 3, 5, 6, 7], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2, 3, 5, 6]);
// Refresh other leaf.
trie.find(&[0, 1, 2, 3, 4], &mut blocks);
trie.find(&[1, 2, 3], &mut blocks);
// Remove the leave blocks exactly.
assert_eq!(trie.evict(2), vec![5, 6]);
blocks.clear();
trie.find(&[0, 1, 2, 3, 5, 6, 7], &mut blocks);
assert_eq!(blocks, vec![0, 1, 2, 3]);
trie.find(&[1, 2, 3], &mut blocks);
// Remove more than the leave blocks.
assert_eq!(trie.evict(3), vec![4, 3, 2]);
blocks.clear();
trie.find(&[0, 1, 2, 3, 4], &mut blocks);
assert_eq!(blocks, vec![0, 1]);
// Clear out the whole trie.
assert_eq!(trie.evict(10), vec![1, 2, 3, 0, 1]);
}
}
|
text-generation-inference/backends/v3/src/radix.rs/0
|
{
"file_path": "text-generation-inference/backends/v3/src/radix.rs",
"repo_id": "text-generation-inference",
"token_count": 14068
}
| 222
|
import pytest
from text_generation import Client, AsyncClient
from text_generation.errors import NotFoundError, ValidationError
from text_generation.types import FinishReason, InputToken
def test_generate(llama_7b_url, hf_headers):
client = Client(llama_7b_url, hf_headers)
response = client.generate("test", max_new_tokens=1, decoder_input_details=True)
assert response.generated_text == "_"
assert response.details.finish_reason == FinishReason.Length
assert response.details.generated_tokens == 1
assert response.details.seed is None
assert len(response.details.prefill) == 2
assert response.details.prefill[0] == InputToken(id=1, text="<s>", logprob=None)
assert len(response.details.tokens) == 1
assert response.details.tokens[0].id == 29918
assert response.details.tokens[0].text == "_"
assert not response.details.tokens[0].special
def test_generate_best_of(llama_7b_url, hf_headers):
client = Client(llama_7b_url, hf_headers)
response = client.generate(
"test", max_new_tokens=1, best_of=2, do_sample=True, decoder_input_details=True
)
assert response.details.seed is not None
assert response.details.best_of_sequences is not None
assert len(response.details.best_of_sequences) == 1
assert response.details.best_of_sequences[0].seed is not None
def test_generate_not_found(fake_url, hf_headers):
client = Client(fake_url, hf_headers)
with pytest.raises(NotFoundError):
client.generate("test")
def test_generate_validation_error(llama_7b_url, hf_headers):
client = Client(llama_7b_url, hf_headers)
with pytest.raises(ValidationError):
client.generate("test", max_new_tokens=10_000)
def test_generate_stream(llama_7b_url, hf_headers):
client = Client(llama_7b_url, hf_headers)
responses = [
response for response in client.generate_stream("test", max_new_tokens=1)
]
assert len(responses) == 1
response = responses[0]
assert response.generated_text == "_"
assert response.details.finish_reason == FinishReason.Length
assert response.details.generated_tokens == 1
assert response.details.seed is None
def test_generate_stream_not_found(fake_url, hf_headers):
client = Client(fake_url, hf_headers)
with pytest.raises(NotFoundError):
list(client.generate_stream("test"))
def test_generate_stream_validation_error(llama_7b_url, hf_headers):
client = Client(llama_7b_url, hf_headers)
with pytest.raises(ValidationError):
list(client.generate_stream("test", max_new_tokens=10_000))
@pytest.mark.asyncio
async def test_generate_async(llama_7b_url, hf_headers):
client = AsyncClient(llama_7b_url, hf_headers)
response = await client.generate(
"test", max_new_tokens=1, decoder_input_details=True
)
assert response.generated_text == "_"
assert response.details.finish_reason == FinishReason.Length
assert response.details.generated_tokens == 1
assert response.details.seed is None
assert len(response.details.prefill) == 2
assert response.details.prefill[0] == InputToken(id=1, text="<s>", logprob=None)
assert response.details.prefill[1] == InputToken(
id=1243, text="test", logprob=-10.96875
)
assert len(response.details.tokens) == 1
assert response.details.tokens[0].id == 29918
assert response.details.tokens[0].text == "_"
assert not response.details.tokens[0].special
@pytest.mark.asyncio
async def test_generate_async_best_of(llama_7b_url, hf_headers):
client = AsyncClient(llama_7b_url, hf_headers)
response = await client.generate(
"test", max_new_tokens=1, best_of=2, do_sample=True, decoder_input_details=True
)
assert response.details.seed is not None
assert response.details.best_of_sequences is not None
assert len(response.details.best_of_sequences) == 1
assert response.details.best_of_sequences[0].seed is not None
@pytest.mark.asyncio
async def test_generate_async_not_found(fake_url, hf_headers):
client = AsyncClient(fake_url, hf_headers)
with pytest.raises(NotFoundError):
await client.generate("test")
@pytest.mark.asyncio
async def test_generate_async_validation_error(llama_7b_url, hf_headers):
client = AsyncClient(llama_7b_url, hf_headers)
with pytest.raises(ValidationError):
await client.generate("test", max_new_tokens=10_000)
@pytest.mark.asyncio
async def test_generate_stream_async(llama_7b_url, hf_headers):
client = AsyncClient(llama_7b_url, hf_headers)
responses = [
response async for response in client.generate_stream("test", max_new_tokens=1)
]
assert len(responses) == 1
response = responses[0]
assert response.generated_text == "_"
assert response.details.finish_reason == FinishReason.Length
assert response.details.generated_tokens == 1
assert response.details.seed is None
@pytest.mark.asyncio
async def test_generate_stream_async_not_found(fake_url, hf_headers):
client = AsyncClient(fake_url, hf_headers)
with pytest.raises(NotFoundError):
async for _ in client.generate_stream("test"):
pass
@pytest.mark.asyncio
async def test_generate_stream_async_validation_error(llama_7b_url, hf_headers):
client = AsyncClient(llama_7b_url, hf_headers)
with pytest.raises(ValidationError):
async for _ in client.generate_stream("test", max_new_tokens=10_000):
pass
|
text-generation-inference/clients/python/tests/test_client.py/0
|
{
"file_path": "text-generation-inference/clients/python/tests/test_client.py",
"repo_id": "text-generation-inference",
"token_count": 2110
}
| 223
|
# Monitoring TGI server with Prometheus and Grafana dashboard
TGI server deployment can easily be monitored through a Grafana dashboard, consuming a Prometheus data collection. Example of inspectable metrics are statistics on the effective batch sizes used by TGI, prefill/decode latencies, number of generated tokens, etc.
In this tutorial, we look at how to set up a local Grafana dashboard to monitor TGI usage.

## Setup on the server machine
First, on your server machine, TGI needs to be launched as usual. TGI exposes [multiple](https://github.com/huggingface/text-generation-inference/discussions/1127#discussioncomment-7240527) metrics that can be collected by Prometheus monitoring server.
In the rest of this tutorial, we assume that TGI was launched through Docker with `--network host`.
On the server where TGI is hosted, a Prometheus server needs to be installed and launched. To do so, please follow [Prometheus installation instructions](https://prometheus.io/download/#prometheus). For example, at the time of writing on a Linux machine:
```
wget https://github.com/prometheus/prometheus/releases/download/v2.52.0/prometheus-2.52.0.linux-amd64.tar.gz
tar -xvzf prometheus-2.52.0.linux-amd64.tar.gz
cd prometheus
```
Prometheus needs to be configured to listen on TGI's port. To do so, in Prometheus configuration file `prometheus.yml`, one needs to edit the lines:
```
static_configs:
- targets: ["0.0.0.0:80"]
```
to use the correct IP address and port.
We suggest to try `curl 0.0.0.0:80/generate -X POST -d '{"inputs":"hey chatbot, how are","parameters":{"max_new_tokens":15}}' -H 'Content-Type: application/json'` on the server side to make sure to configure the correct IP and port.
Once Prometheus is configured, Prometheus server can be launched on the same machine where TGI is launched:
```
./prometheus --config.file="prometheus.yml"
```
In this guide, Prometheus monitoring data will be consumed on a local computer. Hence, we need to forward Prometheus port (by default 9090) to the local computer. To do so, we can for example:
* Use ssh [local port forwarding](https://www.ssh.com/academy/ssh/tunneling-example)
* Use ngrok port tunneling
For simplicity, we will use [Ngrok](https://ngrok.com/docs/) in this guide to tunnel Prometheus port from the TGI server to the outside word.
For that, you should follow the steps at https://dashboard.ngrok.com/get-started/setup/linux, and once Ngrok is installed, use:
```bash
ngrok http http://0.0.0.0:9090
```
As a sanity check, one can make sure that Prometheus server can be accessed at the URL given by Ngrok (in the style of https://d661-4-223-164-145.ngrok-free.app) from a local machine.
## Setup on the monitoring machine
Monitoring is typically done on an other machine than the server one. We use a Grafana dashboard to monitor TGI's server usage.
Two options are available:
* Use Grafana Cloud for an hosted dashboard solution (https://grafana.com/products/cloud/).
* Self-host a grafana dashboard.
In this tutorial, for simplicity, we will self host the dashbard. We recommend installing Grafana Open-source edition following [the official install instructions](https://grafana.com/grafana/download?platform=linux&edition=oss), using the available Linux binaries. For example:
```bash
wget https://dl.grafana.com/oss/release/grafana-11.0.0.linux-amd64.tar.gz
tar -zxvf grafana-11.0.0.linux-amd64.tar.gz
cd grafana-11.0.0
./bin/grafana-server
```
Once the Grafana server is launched, the Grafana interface is available at http://localhost:3000. One needs to log in with the `admin` username and `admin` password.
Once logged in, the Prometheus data source for Grafana needs to be configured, in the option `Add your first data source`. There, a Prometheus data source needs to be added with the Ngrok address we got earlier, that exposes Prometheus port (example: https://d661-4-223-164-145.ngrok-free.app).
Once Prometheus data source is configured, we can finally create our dashboard! From home, go to `Create your first dashboard` and then `Import dashboard`. There, we will use the recommended dashboard template [tgi_grafana.json](https://github.com/huggingface/text-generation-inference/blob/main/assets/tgi_grafana.json) for a dashboard ready to be used, but you may configure your own dashboard as you like.
Community contributed dashboard templates are also available, for example [here](https://grafana.com/grafana/dashboards/19831-text-generation-inference-dashboard/) or [here](https://grafana.com/grafana/dashboards/20246-text-generation-inference/).
Load your dashboard configuration, and your TGI dashboard should be ready to go!
|
text-generation-inference/docs/source/basic_tutorials/monitoring.md/0
|
{
"file_path": "text-generation-inference/docs/source/basic_tutorials/monitoring.md",
"repo_id": "text-generation-inference",
"token_count": 1376
}
| 224
|
# Tensor Parallelism
Tensor parallelism is a technique used to fit a large model in multiple GPUs. For example, when multiplying the input tensors with the first weight tensor, the matrix multiplication is equivalent to splitting the weight tensor column-wise, multiplying each column with the input separately, and then concatenating the separate outputs. These outputs are then transferred from the GPUs and concatenated together to get the final result, like below 👇

<Tip warning={true}>
Tensor Parallelism only works for [models officially supported](../supported_models), it will not work when falling back to `transformers`. You can get more information about unsupported models [here](../basic_tutorials/non_core_models).
</Tip>
You can learn a lot more details about tensor-parallelism from [the `transformers` docs](https://huggingface.co/docs/transformers/main/en/perf_train_gpu_many#tensor-parallelism).
|
text-generation-inference/docs/source/conceptual/tensor_parallelism.md/0
|
{
"file_path": "text-generation-inference/docs/source/conceptual/tensor_parallelism.md",
"repo_id": "text-generation-inference",
"token_count": 272
}
| 225
|
import asyncio
import contextlib
import json
import math
import os
import random
import shutil
import subprocess
import sys
import tempfile
import time
from typing import Dict, List, Optional
import docker
import pytest
from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError
from docker.errors import NotFound
from syrupy.extensions.json import JSONSnapshotExtension
from text_generation import AsyncClient
from text_generation.types import (
BestOfSequence,
ChatComplete,
ChatCompletionChunk,
ChatCompletionComplete,
Completion,
Details,
Grammar,
InputToken,
Response,
Token,
)
DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None)
HF_TOKEN = os.getenv("HF_TOKEN", None)
DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data")
DOCKER_DEVICES = os.getenv("DOCKER_DEVICES")
def pytest_addoption(parser):
parser.addoption(
"--release", action="store_true", default=False, help="run release tests"
)
def pytest_configure(config):
config.addinivalue_line("markers", "release: mark test as a release-only test")
def pytest_collection_modifyitems(config, items):
if config.getoption("--release"):
# --release given in cli: do not skip release tests
return
skip_release = pytest.mark.skip(reason="need --release option to run")
for item in items:
if "release" in item.keywords:
item.add_marker(skip_release)
class ResponseComparator(JSONSnapshotExtension):
rtol = 0.2
ignore_logprob = False
def serialize(
self,
data,
*,
exclude=None,
matcher=None,
):
if (
isinstance(data, Response)
or isinstance(data, ChatComplete)
or isinstance(data, ChatCompletionChunk)
or isinstance(data, ChatCompletionComplete)
):
data = data.model_dump()
if isinstance(data, List):
data = [d.model_dump() for d in data]
data = self._filter(
data=data, depth=0, path=(), exclude=exclude, matcher=matcher
)
return json.dumps(data, indent=2, ensure_ascii=False, sort_keys=False) + "\n"
def matches(
self,
*,
serialized_data,
snapshot_data,
) -> bool:
def convert_data(data):
data = json.loads(data)
if isinstance(data, Dict) and "choices" in data:
choices = data["choices"]
if isinstance(choices, List) and len(choices) >= 1:
if "delta" in choices[0]:
return ChatCompletionChunk(**data)
if "text" in choices[0]:
return Completion(**data)
return ChatComplete(**data)
if isinstance(data, Dict):
return Response(**data)
if isinstance(data, List):
if (
len(data) > 0
and "object" in data[0]
and data[0]["object"] == "text_completion"
):
return [Completion(**d) for d in data]
return [Response(**d) for d in data]
raise NotImplementedError
def eq_token(token: Token, other: Token) -> bool:
return (
token.id == other.id
and token.text == other.text
and (
self.ignore_logprob
or (token.logprob == other.logprob and token.logprob is None)
or math.isclose(token.logprob, other.logprob, rel_tol=self.rtol)
)
and token.special == other.special
)
def eq_prefill_token(prefill_token: InputToken, other: InputToken) -> bool:
try:
return (
prefill_token.id == other.id
and prefill_token.text == other.text
and (
self.ignore_logprob
or math.isclose(
prefill_token.logprob,
other.logprob,
rel_tol=self.rtol,
)
if prefill_token.logprob is not None
else prefill_token.logprob == other.logprob
)
)
except TypeError:
return False
def eq_best_of(details: BestOfSequence, other: BestOfSequence) -> bool:
return (
details.finish_reason == other.finish_reason
and details.generated_tokens == other.generated_tokens
and details.seed == other.seed
and len(details.prefill) == len(other.prefill)
and all(
[
eq_prefill_token(d, o)
for d, o in zip(details.prefill, other.prefill)
]
)
and len(details.tokens) == len(other.tokens)
and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
)
def eq_details(details: Details, other: Details) -> bool:
return (
details.finish_reason == other.finish_reason
and details.generated_tokens == other.generated_tokens
and details.seed == other.seed
and len(details.prefill) == len(other.prefill)
and all(
[
eq_prefill_token(d, o)
for d, o in zip(details.prefill, other.prefill)
]
)
and len(details.tokens) == len(other.tokens)
and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
and (
len(details.best_of_sequences)
if details.best_of_sequences is not None
else 0
)
== (
len(other.best_of_sequences)
if other.best_of_sequences is not None
else 0
)
and (
all(
[
eq_best_of(d, o)
for d, o in zip(
details.best_of_sequences, other.best_of_sequences
)
]
)
if details.best_of_sequences is not None
else details.best_of_sequences == other.best_of_sequences
)
)
def eq_completion(response: Completion, other: Completion) -> bool:
return response.choices[0].text == other.choices[0].text
def eq_chat_complete(response: ChatComplete, other: ChatComplete) -> bool:
return (
response.choices[0].message.content == other.choices[0].message.content
)
def eq_chat_complete_chunk(
response: ChatCompletionChunk, other: ChatCompletionChunk
) -> bool:
return response.choices[0].delta.content == other.choices[0].delta.content
def eq_response(response: Response, other: Response) -> bool:
return response.generated_text == other.generated_text and eq_details(
response.details, other.details
)
serialized_data = convert_data(serialized_data)
snapshot_data = convert_data(snapshot_data)
if not isinstance(serialized_data, List):
serialized_data = [serialized_data]
if not isinstance(snapshot_data, List):
snapshot_data = [snapshot_data]
if isinstance(serialized_data[0], Completion):
return len(snapshot_data) == len(serialized_data) and all(
[eq_completion(r, o) for r, o in zip(serialized_data, snapshot_data)]
)
if isinstance(serialized_data[0], ChatComplete):
return len(snapshot_data) == len(serialized_data) and all(
[eq_chat_complete(r, o) for r, o in zip(serialized_data, snapshot_data)]
)
if isinstance(serialized_data[0], ChatCompletionChunk):
return len(snapshot_data) == len(serialized_data) and all(
[
eq_chat_complete_chunk(r, o)
for r, o in zip(serialized_data, snapshot_data)
]
)
return len(snapshot_data) == len(serialized_data) and all(
[eq_response(r, o) for r, o in zip(serialized_data, snapshot_data)]
)
class GenerousResponseComparator(ResponseComparator):
# Needed for GPTQ with exllama which has serious numerical fluctuations.
rtol = 0.75
class IgnoreLogProbResponseComparator(ResponseComparator):
ignore_logprob = True
class LauncherHandle:
def __init__(self, port: int):
self.client = AsyncClient(f"http://localhost:{port}", timeout=30)
def _inner_health(self):
raise NotImplementedError
async def health(self, timeout: int = 60):
assert timeout > 0
for _ in range(timeout):
if not self._inner_health():
raise RuntimeError("Launcher crashed")
try:
await self.client.generate("test")
return
except (ClientConnectorError, ClientOSError, ServerDisconnectedError):
time.sleep(1)
raise RuntimeError("Health check failed")
class ContainerLauncherHandle(LauncherHandle):
def __init__(self, docker_client, container_name, port: int):
super(ContainerLauncherHandle, self).__init__(port)
self.docker_client = docker_client
self.container_name = container_name
def _inner_health(self) -> bool:
container = self.docker_client.containers.get(self.container_name)
return container.status in ["running", "created"]
class ProcessLauncherHandle(LauncherHandle):
def __init__(self, process, port: int):
super(ProcessLauncherHandle, self).__init__(port)
self.process = process
def _inner_health(self) -> bool:
return self.process.poll() is None
@pytest.fixture
def response_snapshot(snapshot):
return snapshot.use_extension(ResponseComparator)
@pytest.fixture
def generous_response_snapshot(snapshot):
return snapshot.use_extension(GenerousResponseComparator)
@pytest.fixture
def ignore_logprob_response_snapshot(snapshot):
return snapshot.use_extension(IgnoreLogProbResponseComparator)
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="module")
def launcher(event_loop):
@contextlib.contextmanager
def local_launcher(
model_id: str,
num_shard: Optional[int] = None,
quantize: Optional[str] = None,
trust_remote_code: bool = False,
use_flash_attention: bool = True,
disable_grammar_support: bool = False,
dtype: Optional[str] = None,
revision: Optional[str] = None,
max_input_length: Optional[int] = None,
max_batch_prefill_tokens: Optional[int] = None,
max_total_tokens: Optional[int] = None,
lora_adapters: Optional[List[str]] = None,
cuda_graphs: Optional[List[int]] = None,
):
port = random.randint(8000, 10_000)
master_port = random.randint(10_000, 20_000)
shard_uds_path = (
f"/tmp/tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}-server"
)
args = [
"text-generation-launcher",
"--model-id",
model_id,
"--port",
str(port),
"--master-port",
str(master_port),
"--shard-uds-path",
shard_uds_path,
]
env = os.environ
if disable_grammar_support:
args.append("--disable-grammar-support")
if num_shard is not None:
args.extend(["--num-shard", str(num_shard)])
if quantize is not None:
args.append("--quantize")
args.append(quantize)
if dtype is not None:
args.append("--dtype")
args.append(dtype)
if revision is not None:
args.append("--revision")
args.append(revision)
if trust_remote_code:
args.append("--trust-remote-code")
if max_input_length:
args.append("--max-input-length")
args.append(str(max_input_length))
if max_batch_prefill_tokens:
args.append("--max-batch-prefill-tokens")
args.append(str(max_batch_prefill_tokens))
if max_total_tokens:
args.append("--max-total-tokens")
args.append(str(max_total_tokens))
if lora_adapters:
args.append("--lora-adapters")
args.append(",".join(lora_adapters))
if cuda_graphs:
args.append("--cuda-graphs")
args.append(",".join(map(str, cuda_graphs)))
print(" ".join(args), file=sys.stderr)
env["LOG_LEVEL"] = "info,text_generation_router=debug"
if not use_flash_attention:
env["USE_FLASH_ATTENTION"] = "false"
with tempfile.TemporaryFile("w+") as tmp:
# We'll output stdout/stderr to a temporary file. Using a pipe
# cause the process to block until stdout is read.
with subprocess.Popen(
args,
stdout=tmp,
stderr=subprocess.STDOUT,
env=env,
) as process:
yield ProcessLauncherHandle(process, port)
process.terminate()
process.wait(60)
tmp.seek(0)
shutil.copyfileobj(tmp, sys.stderr)
if not use_flash_attention:
del env["USE_FLASH_ATTENTION"]
@contextlib.contextmanager
def docker_launcher(
model_id: str,
num_shard: Optional[int] = None,
quantize: Optional[str] = None,
trust_remote_code: bool = False,
use_flash_attention: bool = True,
disable_grammar_support: bool = False,
dtype: Optional[str] = None,
revision: Optional[str] = None,
max_input_length: Optional[int] = None,
max_batch_prefill_tokens: Optional[int] = None,
max_total_tokens: Optional[int] = None,
lora_adapters: Optional[List[str]] = None,
cuda_graphs: Optional[List[int]] = None,
):
port = random.randint(8000, 10_000)
args = ["--model-id", model_id, "--env"]
if disable_grammar_support:
args.append("--disable-grammar-support")
if num_shard is not None:
args.extend(["--num-shard", str(num_shard)])
if quantize is not None:
args.append("--quantize")
args.append(quantize)
if dtype is not None:
args.append("--dtype")
args.append(dtype)
if revision is not None:
args.append("--revision")
args.append(revision)
if trust_remote_code:
args.append("--trust-remote-code")
if max_input_length:
args.append("--max-input-length")
args.append(str(max_input_length))
if max_batch_prefill_tokens:
args.append("--max-batch-prefill-tokens")
args.append(str(max_batch_prefill_tokens))
if max_total_tokens:
args.append("--max-total-tokens")
args.append(str(max_total_tokens))
if lora_adapters:
args.append("--lora-adapters")
args.append(",".join(lora_adapters))
if cuda_graphs:
args.append("--cuda-graphs")
args.append(",".join(map(str, cuda_graphs)))
client = docker.from_env()
container_name = f"tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}"
try:
container = client.containers.get(container_name)
container.stop()
container.wait()
except NotFound:
pass
gpu_count = num_shard if num_shard is not None else 1
env = {
"LOG_LEVEL": "info,text_generation_router=debug",
}
if not use_flash_attention:
env["USE_FLASH_ATTENTION"] = "false"
if HF_TOKEN is not None:
env["HF_TOKEN"] = HF_TOKEN
volumes = []
if DOCKER_VOLUME:
volumes = [f"{DOCKER_VOLUME}:/data"]
if DOCKER_DEVICES:
devices = DOCKER_DEVICES.split(",")
visible = os.getenv("ROCR_VISIBLE_DEVICES")
if visible:
env["ROCR_VISIBLE_DEVICES"] = visible
device_requests = []
else:
devices = []
device_requests = [
docker.types.DeviceRequest(count=gpu_count, capabilities=[["gpu"]])
]
container = client.containers.run(
DOCKER_IMAGE,
command=args,
name=container_name,
environment=env,
auto_remove=False,
detach=True,
device_requests=device_requests,
devices=devices,
volumes=volumes,
ports={"80/tcp": port},
shm_size="1G",
)
yield ContainerLauncherHandle(client, container.name, port)
if not use_flash_attention:
del env["USE_FLASH_ATTENTION"]
try:
container.stop()
container.wait()
except NotFound:
pass
container_output = container.logs().decode("utf-8")
print(container_output, file=sys.stderr)
container.remove()
if DOCKER_IMAGE is not None:
return docker_launcher
return local_launcher
@pytest.fixture(scope="module")
def generate_load():
async def generate_load_inner(
client: AsyncClient,
prompt: str,
max_new_tokens: int,
n: int,
seed: Optional[int] = None,
grammar: Optional[Grammar] = None,
stop_sequences: Optional[List[str]] = None,
) -> List[Response]:
futures = [
client.generate(
prompt,
max_new_tokens=max_new_tokens,
decoder_input_details=True,
seed=seed,
grammar=grammar,
stop_sequences=stop_sequences,
)
for _ in range(n)
]
return await asyncio.gather(*futures)
return generate_load_inner
|
text-generation-inference/integration-tests/conftest.py/0
|
{
"file_path": "text-generation-inference/integration-tests/conftest.py",
"repo_id": "text-generation-inference",
"token_count": 9413
}
| 226
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 2323,
"logprob": null,
"text": "Test"
},
{
"id": 1715,
"logprob": -11.34375,
"text": " request"
}
],
"seed": 0,
"tokens": [
{
"id": 13,
"logprob": -2.2539062,
"special": false,
"text": "."
},
{
"id": 578,
"logprob": -0.15563965,
"special": false,
"text": " The"
},
{
"id": 3622,
"logprob": -0.8203125,
"special": false,
"text": " server"
},
{
"id": 706,
"logprob": 0.0,
"special": false,
"text": " has"
},
{
"id": 539,
"logprob": 0.0,
"special": false,
"text": " not"
},
{
"id": 3686,
"logprob": 0.0,
"special": false,
"text": " yet"
},
{
"id": 3288,
"logprob": 0.0,
"special": false,
"text": " sent"
},
{
"id": 904,
"logprob": 0.0,
"special": false,
"text": " any"
},
{
"id": 828,
"logprob": 0.0,
"special": false,
"text": " data"
},
{
"id": 382,
"logprob": -1.5517578,
"special": false,
"text": ".\n\n"
}
],
"top_tokens": null
},
"generated_text": "Test request. The server has not yet sent any data.\n\n"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 981
}
| 227
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 60,
"prefill": [
{
"id": 610,
"logprob": null,
"text": "def"
},
{
"id": 1489,
"logprob": -5.265625,
"text": " print"
},
{
"id": 100,
"logprob": -0.38305664,
"text": "_"
},
{
"id": 7670,
"logprob": -7.640625,
"text": "hello"
}
],
"seed": 0,
"tokens": [
{
"id": 2284,
"logprob": -0.296875,
"special": false,
"text": "():"
},
{
"id": 303,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1489,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 459,
"logprob": 0.0,
"special": false,
"text": "(\""
},
{
"id": 8302,
"logprob": -0.26611328,
"special": false,
"text": "Hello"
},
{
"id": 10914,
"logprob": -0.7734375,
"special": false,
"text": " World"
},
{
"id": 16013,
"logprob": -0.61816406,
"special": false,
"text": "!\")"
},
{
"id": 222,
"logprob": -0.054870605,
"special": false,
"text": "\n"
},
{
"id": 222,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 610,
"logprob": -0.4152832,
"special": false,
"text": "def"
},
{
"id": 1489,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 100,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 7670,
"logprob": 0.0,
"special": false,
"text": "hello"
},
{
"id": 100,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 444,
"logprob": -0.21618652,
"special": false,
"text": "name"
},
{
"id": 45,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 444,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 731,
"logprob": 0.0,
"special": false,
"text": "):"
},
{
"id": 303,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1489,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 459,
"logprob": 0.0,
"special": false,
"text": "(\""
},
{
"id": 8302,
"logprob": 0.0,
"special": false,
"text": "Hello"
},
{
"id": 925,
"logprob": -3.3476562,
"special": false,
"text": " %"
},
{
"id": 120,
"logprob": 0.0,
"special": false,
"text": "s"
},
{
"id": 11571,
"logprob": -0.08892822,
"special": false,
"text": "!\""
},
{
"id": 925,
"logprob": 0.0,
"special": false,
"text": " %"
},
{
"id": 655,
"logprob": 0.0,
"special": false,
"text": " name"
},
{
"id": 46,
"logprob": 0.0,
"special": false,
"text": ")"
},
{
"id": 222,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 222,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 610,
"logprob": 0.0,
"special": false,
"text": "def"
},
{
"id": 1489,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 100,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 7670,
"logprob": 0.0,
"special": false,
"text": "hello"
},
{
"id": 100,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 444,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 100,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 400,
"logprob": -0.074279785,
"special": false,
"text": "age"
},
{
"id": 45,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 444,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 49,
"logprob": 0.0,
"special": false,
"text": ","
},
{
"id": 11505,
"logprob": 0.0,
"special": false,
"text": " age"
},
{
"id": 731,
"logprob": 0.0,
"special": false,
"text": "):"
},
{
"id": 303,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1489,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 459,
"logprob": 0.0,
"special": false,
"text": "(\""
},
{
"id": 8302,
"logprob": 0.0,
"special": false,
"text": "Hello"
},
{
"id": 925,
"logprob": 0.0,
"special": false,
"text": " %"
},
{
"id": 120,
"logprob": 0.0,
"special": false,
"text": "s"
},
{
"id": 49,
"logprob": -0.07891846,
"special": false,
"text": ","
},
{
"id": 863,
"logprob": 0.0,
"special": false,
"text": " you"
},
{
"id": 904,
"logprob": 0.0,
"special": false,
"text": " are"
},
{
"id": 925,
"logprob": 0.0,
"special": false,
"text": " %"
},
{
"id": 105,
"logprob": 0.0,
"special": false,
"text": "d"
},
{
"id": 11339,
"logprob": 0.0,
"special": false,
"text": " years"
},
{
"id": 3627,
"logprob": 0.0,
"special": false,
"text": " old"
},
{
"id": 11571,
"logprob": 0.0,
"special": false,
"text": "!\""
},
{
"id": 925,
"logprob": 0.0,
"special": false,
"text": " %"
},
{
"id": 327,
"logprob": 0.0,
"special": false,
"text": " ("
},
{
"id": 444,
"logprob": 0.0,
"special": false,
"text": "name"
}
],
"top_tokens": null
},
"generated_text": "():\n print(\"Hello World!\")\n\ndef print_hello_name(name):\n print(\"Hello %s!\" % name)\n\ndef print_hello_name_age(name, age):\n print(\"Hello %s, you are %d years old!\" % (name"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder2/test_flash_starcoder2_default_params.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder2/test_flash_starcoder2_default_params.json",
"repo_id": "text-generation-inference",
"token_count": 4760
}
| 228
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -0.00756073,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.20117188,
"special": false,
"text": "\n"
},
{
"id": 16114,
"logprob": -1.2597656,
"special": false,
"text": "Once"
},
{
"id": 3714,
"logprob": -0.20825195,
"special": false,
"text": " upon"
},
{
"id": 264,
"logprob": -0.00178051,
"special": false,
"text": " a"
},
{
"id": 727,
"logprob": -0.011955261,
"special": false,
"text": " time"
},
{
"id": 28725,
"logprob": -0.17541504,
"special": false,
"text": ","
},
{
"id": 736,
"logprob": -0.91308594,
"special": false,
"text": " there"
},
{
"id": 403,
"logprob": -0.058410645,
"special": false,
"text": " was"
},
{
"id": 264,
"logprob": -0.009689331,
"special": false,
"text": " a"
}
],
"top_tokens": null
},
"generated_text": "\n\nOnce upon a time, there was a"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_llava_next/test_flash_llava_next_simple.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_llava_next/test_flash_llava_next_simple.json",
"repo_id": "text-generation-inference",
"token_count": 867
}
| 229
|
import pytest
@pytest.fixture(scope="module")
def flash_llama_awq_handle_sharded(launcher):
with launcher(
"abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq",
num_shard=2,
quantize="awq",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_awq_sharded(flash_llama_awq_handle_sharded):
await flash_llama_awq_handle_sharded.health(300)
return flash_llama_awq_handle_sharded.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_llama_awq_sharded(flash_llama_awq_sharded, response_snapshot):
response = await flash_llama_awq_sharded.generate(
"What is Deep Learning?", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_llama_awq_load_sharded(
flash_llama_awq_sharded, generate_load, response_snapshot
):
responses = await generate_load(
flash_llama_awq_sharded, "What is Deep Learning?", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all(
[
r.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
for r in responses
]
)
assert responses == response_snapshot
|
text-generation-inference/integration-tests/models/test_flash_awq_sharded.py/0
|
{
"file_path": "text-generation-inference/integration-tests/models/test_flash_awq_sharded.py",
"repo_id": "text-generation-inference",
"token_count": 624
}
| 230
|
import pytest
@pytest.fixture(scope="module")
def flash_neox_handle(launcher):
with launcher("stabilityai/stablelm-tuned-alpha-3b", num_shard=1) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_neox(flash_neox_handle):
await flash_neox_handle.health(300)
return flash_neox_handle.client
@pytest.mark.release
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_neox(flash_neox, response_snapshot):
response = await flash_neox.generate(
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_neox_load(flash_neox, generate_load, response_snapshot):
responses = await generate_load(
flash_neox,
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert len(generated_texts) == 4
assert all(
[text == generated_texts[0] for text in generated_texts]
), generated_texts
assert responses == response_snapshot
|
text-generation-inference/integration-tests/models/test_flash_neox.py/0
|
{
"file_path": "text-generation-inference/integration-tests/models/test_flash_neox.py",
"repo_id": "text-generation-inference",
"token_count": 514
}
| 231
|
import pytest
@pytest.fixture(scope="module")
def mpt_sharded_handle(launcher):
with launcher("mosaicml/mpt-7b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def mpt_sharded(mpt_sharded_handle):
await mpt_sharded_handle.health(300)
return mpt_sharded_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_mpt(mpt_sharded, response_snapshot):
response = await mpt_sharded.generate(
"What is Deep Learning?",
max_new_tokens=17,
decoder_input_details=True,
)
assert response.details.generated_tokens == 17
assert (
response.generated_text
== " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_mpt_load(mpt_sharded, generate_load, response_snapshot):
responses = await generate_load(
mpt_sharded,
"What is Deep Learning?",
max_new_tokens=17,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert (
responses[0].generated_text
== " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
)
assert responses == response_snapshot
|
text-generation-inference/integration-tests/models/test_mpt.py/0
|
{
"file_path": "text-generation-inference/integration-tests/models/test_mpt.py",
"repo_id": "text-generation-inference",
"token_count": 541
}
| 232
|
import { check } from 'k6';
import { scenario } from 'k6/execution';
import http from 'k6/http';
import { Trend, Counter } from 'k6/metrics';
const host = __ENV.HOST;
const model_id = __ENV.MODEL_ID;
const timePerToken = new Trend('time_per_token', true);
const tokens = new Counter('tokens');
const new_tokens = new Counter('new_tokens');
const input_tokens = new Counter('input_tokens');
const max_new_tokens = 50;
// const shareGPT = JSON.parse(open("ShareGPT_V3_unfiltered_cleaned_split.json"))
const shareGPT = JSON.parse(open("small.json"))
export function get_options() {
return {
thresholds: {
http_req_failed: ['rate==0'],
// time_per_token: [{
// threshold: `p(50)<${5 * reference_latency_ms}`,
// abortOnFail: true,
// delayAbortEval: '10s'
// }],
},
scenarios: {
// single_user: {
// executor: 'constant-arrival-rate',
// duration: '60s',
// preAllocatedVUs: 1,
// rate: 20,
// timeUnit: '1s',
// },
// load_test: {
// executor: 'constant-arrival-rate',
// duration: '60s',
// preAllocatedVUs: 100,
// rate: 1,
// timeUnit: '1s',
// },
// breakpoint: {
// executor: 'ramping-arrival-rate', //Assure load increase if the system slows
// preAllocatedVUs: 300,
// stages: [
// { duration: '60s', target: 100 }, // just slowly ramp-up to a HUGE load
// ],
// },
throughput: {
executor: 'shared-iterations',
vus: 100,
iterations: 200,
maxDuration: '40s',
},
},
};
}
function generate_payload(gpt, max_new_tokens) {
const input = gpt["conversations"][0]["value"];
return { "messages": [{ "role": "user", "content": input }], "temperature": 0, "model": `${model_id}`, "max_tokens": max_new_tokens }
}
export const options = get_options();
export default function run() {
const headers = { 'Content-Type': 'application/json' };
const query = shareGPT[scenario.iterationInTest % shareGPT.length];
const payload = JSON.stringify(generate_payload(query, max_new_tokens));
const res = http.post(`http://${host}/v1/chat/completions`, payload, {
headers,
});
if (res.status >= 400 && res.status < 500) {
return;
}
check(res, {
'Post status is 200': (res) => res.status === 200,
});
const duration = res.timings.duration;
if (res.status === 200) {
const body = res.json();
const completion_tokens = body.usage.completion_tokens;
const latency_ms_per_token = duration / completion_tokens;
timePerToken.add(latency_ms_per_token);
const prompt_tokens = body.usage.prompt_tokens;
input_tokens.add(prompt_tokens);
new_tokens.add(completion_tokens);
tokens.add(completion_tokens + prompt_tokens);
}
}
|
text-generation-inference/load_tests/common.js/0
|
{
"file_path": "text-generation-inference/load_tests/common.js",
"repo_id": "text-generation-inference",
"token_count": 1530
}
| 233
|
flash_att_commit := 3a9bfd076f98746c73362328958dbc68d145fbec
build-flash-attention:
if [ ! -d 'flash-attention' ]; then \
pip install -U packaging ninja --no-cache-dir && \
git clone https://github.com/HazyResearch/flash-attention.git; \
fi
cd flash-attention && git fetch && git checkout $(flash_att_commit) && \
MAX_JOBS=8 python setup.py build && cd csrc/layer_norm && python setup.py build && cd ../rotary && python setup.py build
install-flash-attention: build-flash-attention
cd flash-attention && git checkout $(flash_att_commit) && MAX_JOBS=8 python setup.py install && cd csrc/layer_norm && python setup.py install && cd ../rotary && python setup.py install
|
text-generation-inference/server/Makefile-flash-att/0
|
{
"file_path": "text-generation-inference/server/Makefile-flash-att",
"repo_id": "text-generation-inference",
"token_count": 231
}
| 234
|
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#ifndef _q4_matmul_cuh
#define _q4_matmul_cuh
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
#include <cstdio>
#include <ATen/cuda/CUDAContext.h>
#include "q4_matrix.cuh"
#include "../tuning.h"
void q4_matmul_cuda
(
ExLlamaTuning* tuningParams,
const half* x,
const int x_height,
const Q4Matrix* w,
half* out,
bool no_zero,
cudaStream_t alt_stream
);
void q4_matmul_recons_cuda
(
ExLlamaTuning* tuningParams,
const half* x,
const int x_height,
Q4Matrix* w,
half* out,
bool no_zero,
const cublasHandle_t handle
);
#endif
|
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh/0
|
{
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh",
"repo_id": "text-generation-inference",
"token_count": 322
}
| 235
|
#include "compat.cuh"
__forceinline__ __device__ half2 dot22_8(half2(&dq)[4], const half* a_ptr, const half2 g_result)
{
half2 result = {};
const half2* a2_ptr = (const half2*)a_ptr;
#pragma unroll
for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result);
return __hadd2(result, g_result);
}
__forceinline__ __device__ float dot22_8_f(half2(&dq)[4], const half* a_ptr)
{
half2 result = {};
const half2* a2_ptr = (const half2*)a_ptr;
#pragma unroll
for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result);
return __half2float(__low2half(result)) + __half2float(__high2half(result));
}
__forceinline__ __device__ half2 dot22_8_h2(half2(&dq)[4], const half* a_ptr)
{
half2 result = {};
const half2* a2_ptr = (const half2*)a_ptr;
#pragma unroll
for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result);
return result;
}
typedef void (*fp_gemm_half_q_half_gptq_kernel)
(
const half*,
const uint32_t*,
const uint32_t*,
const half*,
half*,
const int,
const int,
const int,
const int,
const int,
const uint16_t*,
const int,
const bool,
const half*,
const int
);
template <int m_count, bool use_r_weights, bool mul_r_weights>
__global__ void gemm_half_q_half_gptq_kernel
(
const half* __restrict__ a,
const uint32_t* __restrict__ b_q_weight,
const uint32_t* __restrict__ b_gptq_qzeros,
const half* __restrict__ b_gptq_scales,
half* __restrict__ c,
const int size_m,
const int size_n,
const int size_k,
const int groups,
const int groupsize,
const uint16_t* __restrict__ b_q_perm,
const int rows_4,
const bool clear,
const half* r_weights,
const int r_weights_stride
)
{
MatrixView_half a_(a, size_m, size_k);
MatrixView_half_rw c_(c, size_m, size_n);
MatrixView_q4_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n);
MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n);
int t = threadIdx.x;
// Block
int offset_n = blockIdx.x * GPTQ_BLOCK_KN_SIZE * 4;
int offset_m = blockIdx.y * m_count;
int offset_k = blockIdx.z * GPTQ_BLOCK_KN_SIZE;
int end_n = min(offset_n + GPTQ_BLOCK_KN_SIZE * 4, size_n);
int end_m = min(offset_m + m_count, size_m);
int end_k = min(offset_k + GPTQ_BLOCK_KN_SIZE, size_k);
int n = offset_n + t * 4;
// Read weights
half_uint16 weights[MAX_Q_GEMM_WEIGHTS];
if constexpr (use_r_weights)
{
uint16_t any_w = 0;
const half* w_ptr = r_weights;
for (int m = 0; m < m_count; ++m)
{
weights[m].as_half = *w_ptr;
w_ptr += r_weights_stride;
any_w |= weights[m].as_uint16;
}
if (!any_w) return; // Early exit if all weights are zero -- does not zero output (!!!)
}
// Preload block_a
__shared__ half block_a[m_count][GPTQ_BLOCK_KN_SIZE];
if (offset_k + t < end_k)
{
for (int m = 0; m < m_count; ++m)
{
const half* a_ptr = a_.item_ptr(offset_m + m, 0);
half* block_a_ptr = block_a[m];
half a0;
if (b_q_perm) a0 = a_ptr[b_q_perm[offset_k + t]];
else a0 = a_ptr[offset_k + t];
block_a_ptr[t] = a0;
}
}
// Zero output
if (n >= size_n) return;
if (clear && blockIdx.z == 0) // && (threadIdx.x & 1) == 0)
{
for (int m = 0; m < m_count; m++)
*((uint64_t*)c_.item_ptr(offset_m + m, n)) = 0;
}
__syncthreads();
// Find initial group
int group = offset_k / groupsize;
int nextgroup = offset_k + groupsize;
// a, b offset
int qk = offset_k / (32 / 4);
const uint32_t* b_ptr = b_q_weight + qk * size_n + n;
const half* a_ptr = &block_a[0][0];
int a_stride = GPTQ_BLOCK_KN_SIZE;
// Initial group
int zeros[4];
half2 scales[4];
half2 z1z16[4][2];
half2 y1y16[4][2];
b_gptq_qzeros_.item4(zeros, group, n);
b_gptq_scales_.item4_h2(scales, group, n);
dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]);
dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]);
dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]);
dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]);
// __syncthreads();
// Column result
half2 block_c[m_count][4] = {};
// Dequantize and multiply
int k = offset_k;
while (k < end_k)
{
if (k == nextgroup)
{
group++;
nextgroup += groupsize;
b_gptq_qzeros_.item4(zeros, group, n);
b_gptq_scales_.item4_h2(scales, group, n);
dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]);
dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]);
dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]);
dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]);
}
#pragma unroll
for (int j = 0; j < 4; j++)
{
const int4* b_ptr4 = (int4*) b_ptr;
int4 load_int4 = *b_ptr4;
half2 dq[4][4];
dequant_4bit_8_gptq(load_int4.x, dq[0], z1z16[0], y1y16[0], size_n, false);
dequant_4bit_8_gptq(load_int4.y, dq[1], z1z16[1], y1y16[1], size_n, false);
dequant_4bit_8_gptq(load_int4.z, dq[2], z1z16[2], y1y16[2], size_n, false);
dequant_4bit_8_gptq(load_int4.w, dq[3], z1z16[3], y1y16[3], size_n, false);
#pragma unroll
for (int m = 0; m < m_count; m++)
{
if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; }
block_c[m][0] = __hfma2(dot22_8_h2(dq[0], a_ptr + m * a_stride), scales[0], block_c[m][0]);
block_c[m][1] = __hfma2(dot22_8_h2(dq[1], a_ptr + m * a_stride), scales[1], block_c[m][1]);
block_c[m][2] = __hfma2(dot22_8_h2(dq[2], a_ptr + m * a_stride), scales[2], block_c[m][2]);
block_c[m][3] = __hfma2(dot22_8_h2(dq[3], a_ptr + m * a_stride), scales[3], block_c[m][3]);
}
b_ptr += size_n;
a_ptr += 8;
}
k += 32;
}
for (int m = 0; m < m_count; m++)
{
half2 *out = (half2*) c_.item_ptr(offset_m + m, n);
half result0 = __hadd(__low2half(block_c[m][0]), __high2half(block_c[m][0]));
half result1 = __hadd(__low2half(block_c[m][1]), __high2half(block_c[m][1]));
half result2 = __hadd(__low2half(block_c[m][2]), __high2half(block_c[m][2]));
half result3 = __hadd(__low2half(block_c[m][3]), __high2half(block_c[m][3]));
half2 result01 = __halves2half2(result0, result1);
half2 result23 = __halves2half2(result2, result3);
if constexpr (mul_r_weights)
{
half2 w_mul2 = __half2half2(weights[m].as_half);
result01 = __hmul2(result01, w_mul2);
result23 = __hmul2(result23, w_mul2);
}
atomicAdd(out , result01);
atomicAdd(out + 1, result23);
}
}
template <bool use_r_weights, bool mul_r_weights>
struct map_m_count_gptq {
static constexpr fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(int m_count)
{
#if GPTQ_BLOCK_M_SIZE_MAX >= 1
if (m_count == 1) return gemm_half_q_half_gptq_kernel<1, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 2
if (m_count == 2) return gemm_half_q_half_gptq_kernel<2, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 3
if (m_count == 3) return gemm_half_q_half_gptq_kernel<3, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 4
if (m_count == 4) return gemm_half_q_half_gptq_kernel<4, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 5
if (m_count == 5) return gemm_half_q_half_gptq_kernel<5, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 6
if (m_count == 6) return gemm_half_q_half_gptq_kernel<6, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 7
if (m_count == 7) return gemm_half_q_half_gptq_kernel<7, use_r_weights, mul_r_weights>;
#endif
#if GPTQ_BLOCK_M_SIZE_MAX >= 8
if (m_count == 8) return gemm_half_q_half_gptq_kernel<8, use_r_weights, mul_r_weights>;
#endif
return NULL;
}
};
fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(const int m_count, bool r_weights, bool mul_r_weights)
{
if (!r_weights && !mul_r_weights) return map_m_count_gptq<false, false>::pick_gemm_half_q_half_gptq_kernel(m_count);
if (!r_weights && mul_r_weights) return map_m_count_gptq<false, true>::pick_gemm_half_q_half_gptq_kernel(m_count);
if ( r_weights && !mul_r_weights) return map_m_count_gptq< true, false>::pick_gemm_half_q_half_gptq_kernel(m_count);
if ( r_weights && mul_r_weights) return map_m_count_gptq< true, true>::pick_gemm_half_q_half_gptq_kernel(m_count);
return NULL;
}
|
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh/0
|
{
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh",
"repo_id": "text-generation-inference",
"token_count": 4839
}
| 236
|
# Origin: https://github.com/predibase/lorax
# Path: lorax/server/lorax_server/adapters/__init__.py
# License: Apache License Version 2.0, January 2004
from text_generation_server.adapters.weights import (
AdapterBatchData,
AdapterBatchMetadata,
)
__all__ = [
"AdapterBatchData",
"AdapterBatchMetadata",
]
|
text-generation-inference/server/text_generation_server/adapters/__init__.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/adapters/__init__.py",
"repo_id": "text-generation-inference",
"token_count": 125
}
| 237
|
# Copied logic from https://github.com/mit-han-lab/llm-awq/blob/f084f40bd996f3cf3a0633c1ad7d9d476c318aaa/awq/quantize/qmodule.py
from typing import Optional
import torch
import torch.nn as nn
import awq_inference_engine # with CUDA kernels
# class ScaledActivation(nn.Module):
# def __init__(self, module, scales):
# super().__init__()
# self.act = module
# self.scales = nn.Parameter(scales.data)
#
# def forward(self, x):
# return self.act(x) / self.scales.view(1, 1, -1).to(x.device)
class WQLinear(nn.Module):
def __init__(
self, w_bit, group_size, qweight, qzeros, scales, bias: Optional[torch.Tensor]
):
super().__init__()
if w_bit not in [4]:
raise NotImplementedError("Only 4-bit are supported for now.")
self.in_features = qweight.shape[0]
self.out_features = qweight.shape[1] * 32 // w_bit
self.w_bit = w_bit
self.group_size = group_size if group_size != -1 else self.in_features
# quick sanity check (make sure aligment)
assert self.in_features % self.group_size == 0
assert self.out_features % (32 // self.w_bit) == 0
self.qweight = qweight
self.qzeros = qzeros
self.scales = scales
self.bias = bias
@torch.no_grad()
def forward(self, x):
out_shape = x.shape[:-1] + (self.out_features,)
out = awq_inference_engine.gemm_forward_cuda(
x.reshape(-1, x.shape[-1]), self.qweight, self.scales, self.qzeros, 8
)
out = out + self.bias if self.bias is not None else out
return out.reshape(out_shape)
|
text-generation-inference/server/text_generation_server/layers/awq/quantize/qmodule.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/layers/awq/quantize/qmodule.py",
"repo_id": "text-generation-inference",
"token_count": 750
}
| 238
|
from text_generation_server.layers.marlin.fp8 import GPTQMarlinFP8Linear
from text_generation_server.layers.marlin.gptq import (
GPTQMarlinWeightsLoader,
can_use_gptq_marlin,
repack_gptq_for_marlin,
)
from text_generation_server.layers.marlin.marlin import MarlinWeightsLoader
__all__ = [
"GPTQMarlinFP8Linear",
"GPTQMarlinWeightsLoader",
"MarlinWeightsLoader",
"can_use_gptq_marlin",
"repack_gptq_for_marlin",
]
|
text-generation-inference/server/text_generation_server/layers/marlin/__init__.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/layers/marlin/__init__.py",
"repo_id": "text-generation-inference",
"token_count": 195
}
| 239
|
# coding=utf-8
# Copyright 2024 Cohere team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from typing import Optional, List, Tuple
from text_generation_server.layers.attention import (
paged_attention,
attention,
reshape_and_cache,
Seqlen,
)
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
get_linear,
)
from text_generation_server.layers.layernorm import (
FastLayerNorm,
)
from text_generation_server.layers.rotary import (
PositionRotaryEmbedding,
)
from text_generation_server.utils.weights import UnquantizedWeight
if SYSTEM == "cuda":
import dropout_layer_norm
else:
dropout_layer_norm = None
class CohereRotary(PositionRotaryEmbedding):
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
):
# Such controlflows may add some overhead.
if SYSTEM == "cuda":
import rotary_emb
q1 = query[..., ::2]
q2 = query[..., 1::2]
rotary_emb.apply_rotary(q1, q2, cos, sin, q1, q2, False)
k1 = key[..., ::2]
k2 = key[..., 1::2]
rotary_emb.apply_rotary(k1, k2, cos, sin, k1, k2, False)
elif SYSTEM == "rocm":
from vllm._C import ops
# NOTE: On RoCm systems, we use a ROPE implementatation adapted from VLLM which launches a single kernel for both query/key, contrary to flash-attn implementation used on NVIDIA systems.
# Compiling flash-attn rotary on RoCm, it appears hipcc is unable to unroll loops, resulting in an even slower inference compared to eager: https://github.com/pytorch/pytorch/issues/113773
head_size = query.shape[-1]
# Inplace operation, updating query and key.
ops.rotary_embedding(query, key, head_size, cos, sin, False)
elif SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
ipex.llm.functional.rotary_embedding(
query, key, sin, cos, query.size(-1), False
)
else:
raise ValueError(
"Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction."
)
class CohereLayerNorm(nn.Module):
def __init__(self, prefix, weights, eps):
super().__init__()
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
self.weight = nn.Parameter(weight)
# Fake weights
self.ones = weight.new_ones(weight.shape[1])
self.eps = eps
def forward(self, hidden_states):
if hidden_states.shape[-1] > 8192 or SYSTEM != "cuda":
hidden_states = hidden_states.reshape(
-1, self.weight.shape[0], self.weight.shape[1]
)
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
mean = hidden_states.mean(-1, keepdim=True)
hidden_states_minus_mean = hidden_states - mean
variance = hidden_states_minus_mean.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states_minus_mean * torch.rsqrt(variance + self.eps)
hidden_states = self.weight.to(torch.float32) * hidden_states
hidden_states = hidden_states.view(-1, self.weight.shape[1])
return hidden_states.to(input_dtype)
(
hidden_states,
*rest,
) = dropout_layer_norm.dropout_add_ln_fwd(
hidden_states,
None,
self.ones,
None,
None,
None,
None,
None,
0.0,
self.eps,
1.0,
0,
None,
False,
False,
)
# Required to apply one weight matrix per head
hidden_states = hidden_states.view(
-1, self.weight.shape[0], self.weight.shape[1]
)
hidden_states = self.weight * hidden_states
hidden_states = hidden_states.view(-1, self.weight.shape[1])
return hidden_states
def load_attention(config, prefix, weights):
if config.num_attention_heads != config.num_key_value_heads:
return _load_gqa(config, prefix, weights)
else:
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=config.attention_bias,
)
def _load_gqa(config, prefix: str, weights):
assert config.hidden_size % config.num_attention_heads == 0
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
)
if isinstance(weight, UnquantizedWeight):
weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.hidden_size // config.num_attention_heads
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
if config.attention_bias:
w = [
weights.get_sharded(f"{p}.bias", dim=0)
for p in [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"]
]
bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device)
else:
bias = None
return TensorParallelColumnLinear(get_linear(weight, bias=bias))
class FlashCohereAttention(torch.nn.Module):
def __init__(
self,
prefix: str,
config,
weights,
):
super().__init__()
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
self.rotary_emb = CohereRotary.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights)
self.use_qk_norm = config.use_qk_norm
if self.use_qk_norm:
self.q_norm = CohereLayerNorm(
prefix=f"{prefix}.q_norm",
weights=weights,
eps=config.layer_norm_eps,
)
self.k_norm = CohereLayerNorm(
prefix=f"{prefix}.k_norm",
weights=weights,
eps=config.layer_norm_eps,
)
else:
self.q_norm = None
self.k_norm = None
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=config.attention_bias,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
qkv = self.query_key_value(hidden_states)
query, key, value = qkv.split(
[
self.head_size * self.num_heads,
self.head_size * self.num_key_value_heads,
self.head_size * self.num_key_value_heads,
],
dim=1,
)
if self.use_qk_norm:
query = query.reshape(-1, self.head_size)
key = key.reshape(-1, self.head_size)
query = self.q_norm(query.contiguous())
key = self.k_norm(key.contiguous())
query = query.view(-1, self.num_heads, self.head_size)
key = key.view(-1, self.num_key_value_heads, self.head_size)
value = value.view(-1, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, key, cos, sin)
reshape_and_cache(key, value, kv_cache[0], kv_cache[1], slots)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query,
kv_cache[0],
kv_cache[1],
seqlen,
block_tables,
self.softmax_scale,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache[0],
kv_cache[1],
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
)
return self.o_proj(
attn_output.view(-1, self.num_heads * self.head_size), reduce=False
)
class CohereMLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
act = config.hidden_act
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# Fuse gate and up proj
self.gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=False,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=False,
)
self.intermediate_size = (
config.intermediate_size // weights.process_group.size()
)
def forward(self, hidden_states):
gate_up_states = self.gate_up_proj(hidden_states)
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
return self.down_proj(
self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], reduce=False
)
class FlashCohereLayer(nn.Module):
def __init__(self, prefix: str, layer_id, config, weights):
super().__init__()
prefix = f"{prefix}.layers.{layer_id}"
self.self_attn = FlashCohereAttention(
prefix=f"{prefix}.self_attn", config=config, weights=weights
)
self.mlp = CohereMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
self.input_layernorm = FastLayerNorm.load_no_bias(
prefix=f"{prefix}.input_layernorm",
weights=weights,
eps=config.layer_norm_eps,
)
self.process_group = weights.process_group
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
):
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
mlp_output = self.mlp(normed_hidden_states)
output = attn_output + mlp_output
if self.process_group.size() > 1:
torch.distributed.all_reduce(output, group=self.process_group)
return output, res
class FlashCohereModel(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embed_tokens", weights=weights
)
self.layers = nn.ModuleList(
[
FlashCohereLayer(
prefix,
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = FastLayerNorm.load_no_bias(
prefix=f"{prefix}.norm", weights=weights, eps=config.layer_norm_eps
)
self.gradient_checkpointing = False
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: torch.Tensor,
max_s: int,
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashCohereForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
if not prefix:
prefix = "model"
else:
prefix = f"{prefix}.model"
self.model = FlashCohereModel(prefix, config, weights)
try:
self.lm_head = SpeculativeHead.load(
config,
prefix="lm_head",
weights=weights,
)
except RuntimeError:
self.lm_head = SpeculativeHead.load(
config,
prefix=f"{prefix}.embed_tokens",
weights=weights,
)
self.logit_scale = config.logit_scale
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.lm_head(hidden_states)
logits *= self.logit_scale
if speculative_logits is not None:
speculative_logits *= self.logit_scale
return logits, speculative_logits
|
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_cohere_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 8741
}
| 240
|
# coding=utf-8
# Copyright 2024 Starcoder2 AI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from torch import nn
from transformers.activations import ACT2FN
from transformers.configuration_utils import PretrainedConfig
from typing import Optional, List, Tuple
from text_generation_server.layers.attention import (
paged_attention,
attention,
reshape_and_cache,
Seqlen,
)
from text_generation_server.layers import (
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
get_linear,
)
from text_generation_server.layers.layernorm import (
FastLayerNorm,
FastRMSNorm,
)
from text_generation_server.layers.rotary import (
PositionRotaryEmbedding,
)
from text_generation_server.utils.weights import UnquantizedWeight
class Starcoder2Config(PretrainedConfig):
model_type = "starcoder2"
def __init__(
self,
vocab_size=49152,
hidden_size=3072,
intermediate_size=12288,
num_hidden_layers=30,
num_attention_heads=24,
num_key_value_heads=2,
mlp_type="default",
hidden_act="gelu_pytorch_tanh",
max_position_embeddings=4096,
initializer_range=0.018042,
norm_type="layer_norm",
norm_epsilon=1e-5,
use_cache=True,
bos_token_id=50256,
eos_token_id=50256,
rope_theta=10000.0,
sliding_window=None,
attention_dropout=0.0,
residual_dropout=0.0,
embedding_dropout=0.0,
use_bias: bool = True,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
self.use_bias = use_bias
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.mlp_type = mlp_type
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.norm_type = norm_type
self.norm_epsilon = norm_epsilon
self.use_cache = use_cache
self.rope_theta = rope_theta
self.attention_dropout = attention_dropout
self.residual_dropout = residual_dropout
self.embedding_dropout = embedding_dropout
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
def load_attention(config, prefix, weights):
if config.num_attention_heads != config.num_key_value_heads:
return _load_gqa(config, prefix, weights)
else:
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=config.use_bias,
)
def _load_gqa(config, prefix: str, weights):
assert config.hidden_size % config.num_attention_heads == 0
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
)
if isinstance(weight, UnquantizedWeight):
weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.hidden_size // config.num_attention_heads
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
if config.use_bias:
w = [
weights.get_sharded(f"{p}.bias", dim=0)
for p in [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"]
]
bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device)
else:
bias = None
return TensorParallelColumnLinear(get_linear(weight, bias=bias))
class Starcoder2Attention(torch.nn.Module):
def __init__(
self,
prefix: str,
config,
weights,
):
super().__init__()
self.max_past = (
config.sliding_window if config.sliding_window is not None else -1
)
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights)
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=config.use_bias,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
):
qkv = self.query_key_value(hidden_states)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
if prefill_cache_indices is not None:
kv_to_cache = kv[prefill_cache_indices]
else:
kv_to_cache = kv
reshape_and_cache(
kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query,
kv_cache[0],
kv_cache[1],
seqlen,
block_tables,
self.softmax_scale,
window_size_left=self.max_past,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache[0],
kv_cache[1],
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
)
return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size))
class Starcoder2MLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
act = config.hidden_act
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# Fuse gate and up proj
self.c_fc = TensorParallelColumnLinear.load(
config,
prefix=f"{prefix}.c_fc",
weights=weights,
bias=config.use_bias,
)
self.c_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.c_proj",
weights=weights,
bias=config.use_bias,
)
def forward(self, hidden_states):
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
return self.c_proj(hidden_states)
class Starcoder2GatedMLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
act = config.hidden_act
self.act = (
ACT2FN[act]
if "gelu" not in act
else lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
)
# Fuse gate and up proj
self.gate_up_proj = TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
weights=weights,
dim=0,
bias=config.use_bias,
)
self.down_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.down_proj",
weights=weights,
bias=config.use_bias,
)
self.intermediate_size = (
config.intermediate_size // weights.process_group.size()
)
def forward(self, hidden_states):
gate_up_states = self.gate_up_proj(hidden_states)
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1])
STARCODER2_NORMALIZATION_CLASSES = {
"layer_norm": FastLayerNorm,
"rms_norm": FastRMSNorm,
}
STARCODER2_MLP_CLASSES = {
"default": Starcoder2MLP,
"gated": Starcoder2GatedMLP,
}
class Starcoder2Layer(nn.Module):
def __init__(self, layer_id, config, weights):
super().__init__()
prefix = f"model.layers.{layer_id}"
self.self_attn = Starcoder2Attention(
prefix=f"{prefix}.self_attn", config=config, weights=weights
)
self.mlp = STARCODER2_MLP_CLASSES[config.mlp_type](
prefix=f"{prefix}.mlp", config=config, weights=weights
)
self.input_layernorm = STARCODER2_NORMALIZATION_CLASSES[config.norm_type].load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.norm_epsilon
)
self.post_attention_layernorm = STARCODER2_NORMALIZATION_CLASSES[
config.norm_type
].load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.norm_epsilon,
)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
):
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
)
# faster post attention rms norm
normed_attn_res_output, attn_res = self.post_attention_layernorm(
attn_output, res
)
mlp_output = self.mlp(normed_attn_res_output)
return mlp_output, attn_res
class Starcoder2Model(torch.nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
process_group = weights.process_group
self.tp_rank = process_group.rank()
self.tp_world_size = process_group.size()
self.embed_tokens = TensorParallelEmbedding(
prefix=f"{prefix}.embed_tokens", weights=weights
)
self.layers = nn.ModuleList(
[
Starcoder2Layer(
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = STARCODER2_NORMALIZATION_CLASSES[config.norm_type].load(
prefix=f"{prefix}.norm", weights=weights, eps=config.norm_epsilon
)
self.gradient_checkpointing = False
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
true_max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, true_max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashStarcoder2ForCausalLM(torch.nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
if not prefix:
prefix = "model"
else:
prefix = f"{prefix}.model"
self.model = Starcoder2Model(prefix, config, weights)
try:
self.lm_head = SpeculativeHead.load(
config,
prefix="lm_head",
weights=weights,
)
except RuntimeError:
self.lm_head = SpeculativeHead.load(
config,
prefix=f"{prefix}.embed_tokens",
weights=weights,
)
self.max_past = config.sliding_window
self.max_past_tensor = (
torch.tensor(config.sliding_window, device=weights.device)
if self.max_past is not None
else None
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> torch.Tensor:
true_max_s = max_s
if prefill_cache_indices is not None:
# Slots also need to be sliced as it has the same size as the whole kv tensor
slots = slots[prefill_cache_indices]
elif self.max_past is not None:
# Clamp in decode mode as paged attention requires clamped values whereas the flash attention
# kernel requires the true values
seqlen = seqlen.clamp(max=self.max_past_tensor)
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
true_max_s,
prefill_cache_indices,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits = self.lm_head(hidden_states)
return logits
|
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 8804
}
| 241
|
def load_text_model(prefix, config, weights, name=None):
if config.model_type == "llama":
from text_generation_server.models.custom_modeling.flash_llama_modeling import (
FlashLlamaForCausalLM,
)
return FlashLlamaForCausalLM(prefix, config, weights)
elif config.model_type == "mistral":
from text_generation_server.models.custom_modeling.flash_mistral_modeling import (
FlashMistralForCausalLM,
)
return FlashMistralForCausalLM(prefix, config, weights, name=name)
elif config.model_type == "gemma":
from text_generation_server.models.custom_modeling.flash_gemma_modeling import (
FlashGemmaForCausalLM,
)
return FlashGemmaForCausalLM(prefix, config, weights, causal=False)
elif config.model_type == "paligemma":
from text_generation_server.models.custom_modeling.flash_gemma_modeling import (
FlashGemmaForCausalLM,
)
return FlashGemmaForCausalLM(prefix, config, weights)
else:
raise RuntimeError(f"Unsupported model type {config.model_type}")
def load_vision_model(prefix, config, weights):
if config.model_type == "clip_vision_model":
from text_generation_server.models.custom_modeling.clip import (
CLIPVisionTransformer,
)
return CLIPVisionTransformer(
prefix=f"{prefix}.vision_model", config=config, weights=weights
)
if config.model_type == "siglip_vision_model":
from text_generation_server.models.custom_modeling.siglip import (
SiglipVisionTransformer,
)
return SiglipVisionTransformer(
prefix="vision_tower.vision_model", config=config, weights=weights
)
else:
raise RuntimeError(f"Unsupported model type {config.model_type}")
|
text-generation-inference/server/text_generation_server/models/custom_modeling/vlm.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/vlm.py",
"repo_id": "text-generation-inference",
"token_count": 759
}
| 242
|
# Origin: https://github.com/predibase/lorax
# Path: lorax/server/lorax_server/utils/adapter.py
# License: Apache License Version 2.0, January 2004
import warnings
from dataclasses import dataclass
from functools import lru_cache
from typing import TYPE_CHECKING, Set, Tuple, Optional, List
from safetensors.torch import load_file
from transformers import AutoConfig, AutoTokenizer, PreTrainedTokenizer
from text_generation_server.utils.merges.strategies import merge_adapters
from text_generation_server.utils import hub
from text_generation_server.adapters.lora import LoraConfig
if TYPE_CHECKING:
from text_generation_server.adapters.config import AdapterConfig, ModuleMap
BASE_MODEL_ADAPTER_ID = "__base_model__"
@dataclass
class AdapterInfo:
id: str
path: Optional[str]
@dataclass
class AdapterParameters:
adapter_info: Tuple[AdapterInfo]
weights: Tuple[float]
merge_strategy: NotImplemented
density: float
majority_sign_method: NotImplemented
@dataclass
class AdapterSource:
adapter_id: str
model_id: str
revision: str
def parse_lora_adapters(lora_adapters: Optional[str]) -> List[AdapterInfo]:
if not lora_adapters:
return []
adapter_list = []
for adapter in lora_adapters.split(","):
parts = adapter.strip().split("=")
if len(parts) == 1:
adapter_list.append(AdapterInfo(id=parts[0], path=None))
elif len(parts) == 2:
adapter_list.append(AdapterInfo(id=parts[0], path=parts[1]))
else:
raise ValueError(f"Invalid LoRA adapter format: {adapter}")
return adapter_list
def load_and_merge_adapters(
model_id: str,
adapter_parameters: AdapterParameters,
adapter_index: int,
weight_names: Tuple[str],
trust_remote_code: bool = False,
) -> Tuple["ModuleMap", "AdapterConfig", Set[str], PreTrainedTokenizer]:
if len(adapter_parameters.adapter_info) == 1:
adapter_info = next(iter(adapter_parameters.adapter_info))
return load_module_map(
model_id,
adapter_info.id,
adapter_info.path,
weight_names,
trust_remote_code,
)
adapter_params = AdapterParametersContainer(adapter_parameters, adapter_index)
return _load_and_merge(model_id, adapter_params, weight_names, trust_remote_code)
@dataclass
class AdapterParametersContainer:
adapter_parameters: AdapterParameters
adapter_index: int
def __hash__(self) -> int:
return self.adapter_index
@lru_cache(maxsize=32)
def _load_and_merge(
model_id: str,
adapter_params: AdapterParametersContainer,
weight_names: Tuple[str],
trust_remote_code: bool = False,
) -> Tuple["ModuleMap", "AdapterConfig", Set[str], PreTrainedTokenizer]:
params = adapter_params.adapter_parameters
adapters_to_merge = []
merged_weight_names = set()
tokenizer = None
for adapter in params.adapter_info:
if adapter.id == BASE_MODEL_ADAPTER_ID:
raise ValueError("Base model adapter cannot be merged.")
module_map, adapter_config, adapter_weight_names, adapter_tokenizer = (
load_module_map(
model_id,
adapter.id,
adapter.path,
weight_names,
trust_remote_code,
)
)
adapters_to_merge.append((module_map, adapter_config))
merged_weight_names = merged_weight_names.union(adapter_weight_names)
if tokenizer is None:
tokenizer = adapter_tokenizer
if len(adapters_to_merge) == 0:
raise ValueError("No adapters to merge.")
module_map, adapter_config = merge_adapters(adapters_to_merge, params)
return module_map, adapter_config, merged_weight_names, tokenizer
def check_architectures(
model_id: str,
adapter_id: str,
adapter_config: "AdapterConfig",
trust_remote_code: bool = False,
):
try:
if not adapter_config.base_model_name_or_path:
# Avoid execution latency caused by the network connection retrying for AutoConfig.from_pretrained(None)
return
expected_config = AutoConfig.from_pretrained(
model_id, trust_remote_code=trust_remote_code
)
model_config = AutoConfig.from_pretrained(
adapter_config.base_model_name_or_path, trust_remote_code=trust_remote_code
)
except Exception as e:
warnings.warn(
f"Unable to check architecture compatibility for adapter '{adapter_id}' "
f"against model '{model_id}'. Assuming they are compatible. Error: {e}"
)
return
if model_config.architectures == expected_config.architectures:
warnings.warn(
f"Adapter '{adapter_id}' was not trained on base model '{model_id}'. "
f"If you encounter issues, use --model-id '{adapter_config.base_model_name_or_path}' instead."
)
else:
# TODO(travis): revisit this when we support clasification heads which will not use CausalLM
raise ValueError(
f"Adapter '{adapter_id}' is not compatible with model '{model_id}'. "
f"Architectures differ: {model_config.architectures} != {expected_config.architectures}. "
f"Use --model-id '{adapter_config.base_model_name_or_path}' instead."
)
@lru_cache(maxsize=128)
def load_module_map(
model_id: str,
adapter_id: str,
adapter_path: Optional[str],
weight_names: Tuple[str],
trust_remote_code: bool = False,
) -> Tuple["ModuleMap", "AdapterConfig", Set[str], PreTrainedTokenizer]:
revision = "main"
adapter_config = LoraConfig.load(adapter_path or adapter_id, None)
if not adapter_path and adapter_config.base_model_name_or_path != model_id:
check_architectures(model_id, adapter_id, adapter_config, trust_remote_code)
adapter_filenames = (
hub._adapter_weight_files_from_dir(adapter_path, extension=".safetensors")
if adapter_path
else hub._cached_adapter_weight_files(
adapter_id, revision=revision, extension=".safetensors"
)
)
try:
adapter_tokenizer = AutoTokenizer.from_pretrained(
adapter_config.config_path,
trust_remote_code=trust_remote_code,
)
except Exception:
# Adapter does not have a tokenizer, so fallback to base model tokenizer
adapter_tokenizer = None
# load adapter weights from all shards (should have relatively small memory footprint)
adapter_weights = {}
for filename in adapter_filenames:
adapter_weights.update(load_file(filename))
# map the model weights to the relevant adapter weights (LoRA A and B matrices)
module_map, adapter_weight_names = adapter_config.map_weights_for_model(
adapter_weights, weight_names
)
return module_map, adapter_config, adapter_weight_names, adapter_tokenizer
def get_attn_weights(i, layer):
qkv = layer.self_attn.query_key_value
weights = {}
for k in ["q", "k", "v"]:
key = (i, f"{k}_proj")
value = (f"model.layers.{i}.self_attn.{k}_proj", qkv)
weights[key] = value
weights[(i, "o_proj")] = (
f"model.layers.{i}.self_attn.o_proj",
layer.self_attn.o_proj,
)
return weights
def get_mlp_weights(i, layer):
weights = {}
if hasattr(layer, "mlp"):
mlp = layer.mlp
if hasattr(mlp, "gate_up_proj"):
# handle combined gate_up_proj (e.g., for some LLaMA variants)
weights.update(
{
(i, "gate_proj"): (
f"model.layers.{i}.mlp.gate_proj",
mlp.gate_up_proj,
),
(i, "up_proj"): (f"model.layers.{i}.mlp.up_proj", mlp.gate_up_proj),
}
)
else:
# handle separate gate_proj, up_proj, and down_proj (e.g., for Gemma)
if hasattr(mlp, "gate_proj"):
weights[(i, "gate_proj")] = (
f"model.layers.{i}.mlp.gate_proj",
mlp.gate_proj,
)
if hasattr(mlp, "up_proj"):
weights[(i, "up_proj")] = (f"model.layers.{i}.mlp.up_proj", mlp.up_proj)
if hasattr(mlp, "down_proj"):
weights[(i, "down_proj")] = (
f"model.layers.{i}.mlp.down_proj",
mlp.down_proj,
)
return weights
# build_layer_weight_lookup creates a mapping of model layers to their corresponding
# weight tensors and paths. It builds a dictionary that maps layer identifiers to tuples
# containing the weight tensor path and the actual layer object. This mapping is needed
# for the lora adapter to know which weights to update when applying the adapter.
def build_layer_weight_lookup(model):
if hasattr(model, "language_model"):
m = model.language_model.model
elif hasattr(model, "text_model"):
m = model.text_model.model
else:
m = model.model
layer_weights = {}
for i, layer in enumerate(m.layers):
attn_weights = get_attn_weights(i, layer)
mlp_weights = get_mlp_weights(i, layer)
layer_weights.update(attn_weights)
layer_weights.update(mlp_weights)
lm_head = None
if hasattr(m, "lm_head"):
lm_head = m.lm_head
elif hasattr(model, "lm_head"):
lm_head = model.lm_head
if lm_head:
layer_weights[(0, "lm_head")] = ("lm_head", lm_head)
return layer_weights
|
text-generation-inference/server/text_generation_server/utils/adapter.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/utils/adapter.py",
"repo_id": "text-generation-inference",
"token_count": 4143
}
| 243
|
# coding=utf-8
# Copyright 2023 Authors of "A Watermark for Large Language Models"
# available at https://arxiv.org/abs/2301.10226
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from transformers import LogitsProcessor
from typing import List, Union
GAMMA = float(os.getenv("WATERMARK_GAMMA", 0.5))
DELTA = float(os.getenv("WATERMARK_DELTA", 2.0))
class WatermarkLogitsProcessor(LogitsProcessor):
def __init__(
self,
gamma: float = GAMMA,
delta: float = DELTA,
hash_key: int = 15485863, # just a large prime number to create a rng seed with sufficient bit width
device: str = "cpu",
):
# watermarking parameters
self.gamma = gamma
self.delta = delta
self.rng = torch.Generator(device=device)
self.hash_key = hash_key
def _seed_rng(self, input_ids: Union[List[int], torch.LongTensor]):
if isinstance(input_ids, list):
assert (
len(input_ids) >= 1
), "requires at least a 1 token prefix sequence to seed rng"
prev_token = input_ids[-1]
else:
assert len(input_ids) == 1
input_ids = input_ids[0]
assert (
input_ids.shape[-1] >= 1
), "requires at least a 1 token prefix sequence to seed rng"
prev_token = input_ids[-1].item()
self.rng.manual_seed(self.hash_key * prev_token)
def _get_greenlist_ids(
self,
input_ids: Union[List[int], torch.LongTensor],
max_value: int,
device: torch.device,
) -> List[int]:
# seed the rng using the previous tokens/prefix
self._seed_rng(input_ids)
greenlist_size = int(max_value * self.gamma)
vocab_permutation = torch.randperm(max_value, device=device, generator=self.rng)
greenlist_ids = vocab_permutation[:greenlist_size]
return greenlist_ids
@staticmethod
def _calc_greenlist_mask(
scores: torch.FloatTensor, greenlist_token_ids
) -> torch.BoolTensor:
green_tokens_mask = torch.zeros_like(scores)
green_tokens_mask[-1, greenlist_token_ids] = 1
final_mask = green_tokens_mask.bool()
return final_mask
@staticmethod
def _bias_greenlist_logits(
scores: torch.Tensor, greenlist_mask: torch.Tensor, greenlist_bias: float
) -> torch.Tensor:
scores[greenlist_mask] = scores[greenlist_mask] + greenlist_bias
return scores
def __call__(
self, input_ids: Union[List[int], torch.LongTensor], scores: torch.FloatTensor
) -> torch.FloatTensor:
greenlist_ids = self._get_greenlist_ids(
input_ids, scores.shape[-1], scores.device
)
green_tokens_mask = self._calc_greenlist_mask(
scores=scores, greenlist_token_ids=greenlist_ids
)
scores = self._bias_greenlist_logits(
scores=scores, greenlist_mask=green_tokens_mask, greenlist_bias=self.delta
)
return scores
|
text-generation-inference/server/text_generation_server/utils/watermark.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/utils/watermark.py",
"repo_id": "text-generation-inference",
"token_count": 1489
}
| 244
|
/* eslint-disable @typescript-eslint/no-explicit-any */
import { bertProcessing, byteLevelProcessing, robertaProcessing, sequenceProcessing, templateProcessing } from '../../'
describe('bertProcessing', () => {
it('instantiates correctly with only two parameters', () => {
const processor = bertProcessing(['sep', 1], ['cls', 2])
expect(processor.constructor.name).toEqual('Processor')
})
it('throws if only one argument is provided', () => {
expect(() => (bertProcessing as any)(['sep', 1])).toThrow('Given napi value is not an array')
})
it('throws if arguments are malformed', () => {
expect(() => (bertProcessing as any)(['sep', '1'], ['cls', '2'])).toThrow(
'Failed to convert napi value String into rust type `u32`',
)
expect(() => (bertProcessing as any)(['sep'], ['cls'])).toThrow('Array length < 2')
})
})
describe('byteLevelProcessing', () => {
it('instantiates correctly without any parameter', () => {
const processor = byteLevelProcessing()
expect(processor.constructor.name).toEqual('Processor')
})
it('accepts `undefined` as first parameter', () => {
expect(byteLevelProcessing(undefined)).toBeDefined()
})
it('accepts `boolean` as first parameter', () => {
expect(byteLevelProcessing(true)).toBeDefined()
})
})
describe('robertaProcessing', () => {
it('instantiates correctly with only two parameters', () => {
const processor = robertaProcessing(['sep', 1], ['cls', 2])
expect(processor.constructor.name).toEqual('Processor')
})
it('accepts `undefined` as third and fourth parameters', () => {
expect(robertaProcessing(['sep', 1], ['cls', 2], undefined, undefined)).toBeDefined()
})
it('accepts `boolean` as third and fourth parameter', () => {
expect(robertaProcessing(['sep', 1], ['cls', 2], true, true)).toBeDefined()
})
})
describe('templateProcessing', () => {
it('instantiates correctly with only a single template', () => {
const processor = templateProcessing('$A $A')
expect(processor.constructor.name).toEqual('Processor')
})
it('throws if special tokens are missing', () => {
expect(() => templateProcessing('[CLS] $A [SEP]')).toThrow('Missing SpecialToken(s) with id(s)')
})
it('instantiates correctly with both templates', () => {
const processor = templateProcessing('[CLS] $A [SEP]', '[CLS] $A [SEP] $B:1 [SEP]:1', [
['[CLS]', 1],
['[SEP]', 2],
])
expect(processor.constructor.name).toEqual('Processor')
})
})
describe('sequenceProcessing', () => {
it('accepts `PostProcessor[]` as first parameter', () => {
const template = templateProcessing('[CLS] $A [SEP]', '[CLS] $A [SEP] $B:1 [SEP]:1', [
['[CLS]', 1],
['[SEP]', 2],
])
const bytelevel = byteLevelProcessing(true)
expect(sequenceProcessing([bytelevel, template])).toBeDefined()
})
})
|
tokenizers/bindings/node/lib/bindings/post-processors.test.ts/0
|
{
"file_path": "tokenizers/bindings/node/lib/bindings/post-processors.test.ts",
"repo_id": "tokenizers",
"token_count": 1022
}
| 245
|
# `tokenizers-linux-arm64-gnu`
This is the **aarch64-unknown-linux-gnu** binary for `tokenizers`
|
tokenizers/bindings/node/npm/linux-arm64-gnu/README.md/0
|
{
"file_path": "tokenizers/bindings/node/npm/linux-arm64-gnu/README.md",
"repo_id": "tokenizers",
"token_count": 35
}
| 246
|
use serde::de::Deserializer;
use serde::ser::Serializer;
use serde::{Deserialize, Serialize};
use std::sync::{Arc, RwLock};
pub fn serialize<S, T>(val: &Option<Arc<RwLock<T>>>, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: Serialize,
{
T::serialize(&*(val.clone().unwrap()).read().unwrap(), s)
}
pub fn deserialize<'de, D, T>(d: D) -> Result<Option<Arc<RwLock<T>>>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
Ok(Some(Arc::new(RwLock::new(T::deserialize(d)?))))
}
|
tokenizers/bindings/node/src/arc_rwlock_serde.rs/0
|
{
"file_path": "tokenizers/bindings/node/src/arc_rwlock_serde.rs",
"repo_id": "tokenizers",
"token_count": 220
}
| 247
|
from enum import Enum
from typing import List, Tuple, Union
Offsets = Tuple[int, int]
TextInputSequence = str
"""A :obj:`str` that represents an input sequence """
PreTokenizedInputSequence = Union[List[str], Tuple[str]]
"""A pre-tokenized input sequence. Can be one of:
- A :obj:`List` of :obj:`str`
- A :obj:`Tuple` of :obj:`str`
"""
TextEncodeInput = Union[
TextInputSequence,
Tuple[TextInputSequence, TextInputSequence],
List[TextInputSequence],
]
"""Represents a textual input for encoding. Can be either:
- A single sequence: :data:`~tokenizers.TextInputSequence`
- A pair of sequences:
- A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence`
- Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2
"""
PreTokenizedEncodeInput = Union[
PreTokenizedInputSequence,
Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence],
List[PreTokenizedInputSequence],
]
"""Represents a pre-tokenized input for encoding. Can be either:
- A single sequence: :data:`~tokenizers.PreTokenizedInputSequence`
- A pair of sequences:
- A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence`
- Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2
"""
InputSequence = Union[TextInputSequence, PreTokenizedInputSequence]
"""Represents all the possible types of input sequences for encoding. Can be:
- When ``is_pretokenized=False``: :data:`~TextInputSequence`
- When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence`
"""
EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput]
"""Represents all the possible types of input for encoding. Can be:
- When ``is_pretokenized=False``: :data:`~TextEncodeInput`
- When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput`
"""
class OffsetReferential(Enum):
ORIGINAL = "original"
NORMALIZED = "normalized"
class OffsetType(Enum):
BYTE = "byte"
CHAR = "char"
class SplitDelimiterBehavior(Enum):
REMOVED = "removed"
ISOLATED = "isolated"
MERGED_WITH_PREVIOUS = "merged_with_previous"
MERGED_WITH_NEXT = "merged_with_next"
CONTIGUOUS = "contiguous"
from .tokenizers import (
AddedToken,
Encoding,
NormalizedString,
PreTokenizedString,
Regex,
Token,
Tokenizer,
decoders,
models,
normalizers,
pre_tokenizers,
processors,
trainers,
__version__,
)
from .implementations import (
BertWordPieceTokenizer,
ByteLevelBPETokenizer,
CharBPETokenizer,
SentencePieceBPETokenizer,
SentencePieceUnigramTokenizer,
)
|
tokenizers/bindings/python/py_src/tokenizers/__init__.py/0
|
{
"file_path": "tokenizers/bindings/python/py_src/tokenizers/__init__.py",
"repo_id": "tokenizers",
"token_count": 984
}
| 248
|
# Generated content DO NOT EDIT
class PreTokenizer:
"""
Base class for all pre-tokenizers
This class is not supposed to be instantiated directly. Instead, any implementation of a
PreTokenizer will return an instance of this class when instantiated.
"""
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class BertPreTokenizer(PreTokenizer):
"""
BertPreTokenizer
This pre-tokenizer splits tokens on spaces, and also on punctuation.
Each occurence of a punctuation character will be treated separately.
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class ByteLevel(PreTokenizer):
"""
ByteLevel PreTokenizer
This pre-tokenizer takes care of replacing all bytes of the given string
with a corresponding representation, as well as splitting into words.
Args:
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to add a space to the first word if there isn't already one. This
lets us treat `hello` exactly like `say hello`.
use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`):
Set this to :obj:`False` to prevent this `pre_tokenizer` from using
the GPT2 specific regexp for spliting on whitespace.
"""
def __init__(self, add_prefix_space=True, use_regex=True):
pass
@staticmethod
def alphabet():
"""
Returns the alphabet used by this PreTokenizer.
Since the ByteLevel works as its name suggests, at the byte level, it
encodes each byte value to a unique visible character. This means that there is a
total of 256 different characters composing this alphabet.
Returns:
:obj:`List[str]`: A list of characters that compose the alphabet
"""
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class CharDelimiterSplit(PreTokenizer):
"""
This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)`
Args:
delimiter: str:
The delimiter char that will be used to split input
"""
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Digits(PreTokenizer):
"""
This pre-tokenizer simply splits using the digits in separate tokens
Args:
individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, digits will each be separated as follows::
"Call 123 please" -> "Call ", "1", "2", "3", " please"
If set to False, digits will grouped as follows::
"Call 123 please" -> "Call ", "123", " please"
"""
def __init__(self, individual_digits=False):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Metaspace(PreTokenizer):
"""
Metaspace pre-tokenizer
This pre-tokenizer replaces any whitespace by the provided replacement character.
It then tries to split on these spaces.
Args:
replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
The replacement character. Must be exactly one character. By default we
use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`):
Whether to add a space to the first word if there isn't already one. This
lets us treat `hello` exactly like `say hello`.
Choices: "always", "never", "first". First means the space is only added on the first
token (relevant when special tokens are used or other pre_tokenizer are used).
"""
def __init__(self, replacement="_", prepend_scheme="always", split=True):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Punctuation(PreTokenizer):
"""
This pre-tokenizer simply splits on punctuation as individual characters.
Args:
behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
The behavior to use when splitting.
Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next",
"contiguous"
"""
def __init__(self, behavior="isolated"):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Sequence(PreTokenizer):
"""
This pre-tokenizer composes other pre_tokenizers and applies them in sequence
"""
def __init__(self, pretokenizers):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Split(PreTokenizer):
"""
Split PreTokenizer
This versatile pre-tokenizer splits using the provided pattern and
according to the provided behavior. The pattern can be inverted by
making use of the invert flag.
Args:
pattern (:obj:`str` or :class:`~tokenizers.Regex`):
A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex`.
If you want to use a regex pattern, it has to be wrapped around a `tokenizer.Regex`,
otherwise we consider is as a string pattern. For example `pattern="|"`
means you want to split on `|` (imagine a csv file for example), while
`patter=tokenizer.Regex("1|2")` means you split on either '1' or '2'.
behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
The behavior to use when splitting.
Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
"contiguous"
invert (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to invert the pattern.
"""
def __init__(self, pattern, behavior, invert=False):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class UnicodeScripts(PreTokenizer):
"""
This pre-tokenizer splits on characters that belong to different language family
It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt
Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too.
This mimicks SentencePiece Unigram implementation.
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Whitespace(PreTokenizer):
"""
This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+`
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class WhitespaceSplit(PreTokenizer):
"""
This pre-tokenizer simply splits on the whitespace. Works like `.split()`
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
|
tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi/0
|
{
"file_path": "tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi",
"repo_id": "tokenizers",
"token_count": 9665
}
| 249
|
use pyo3::exceptions;
use pyo3::prelude::*;
use pyo3::type_object::PyTypeInfo;
use std::fmt::{Display, Formatter, Result as FmtResult};
use tokenizers::tokenizer::Result;
#[derive(Debug)]
pub struct PyError(pub String);
impl PyError {
#[allow(dead_code)]
pub fn from(s: &str) -> Self {
PyError(String::from(s))
}
pub fn into_pyerr<T: PyTypeInfo>(self) -> PyErr {
PyErr::new::<T, _>(format!("{}", self))
}
}
impl Display for PyError {
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
write!(fmt, "{}", self.0)
}
}
impl std::error::Error for PyError {}
pub struct ToPyResult<T>(pub Result<T>);
impl<T> From<ToPyResult<T>> for PyResult<T> {
fn from(v: ToPyResult<T>) -> Self {
v.0.map_err(|e| exceptions::PyException::new_err(format!("{}", e)))
}
}
impl<T> ToPyResult<T> {
pub fn into_py(self) -> PyResult<T> {
self.into()
}
}
pub(crate) fn deprecation_warning(py: Python<'_>, version: &str, message: &str) -> PyResult<()> {
let deprecation_warning = py.import_bound("builtins")?.getattr("DeprecationWarning")?;
let full_message = format!("Deprecated in {}: {}", version, message);
pyo3::PyErr::warn_bound(py, &deprecation_warning, &full_message, 0)
}
|
tokenizers/bindings/python/src/error.rs/0
|
{
"file_path": "tokenizers/bindings/python/src/error.rs",
"repo_id": "tokenizers",
"token_count": 536
}
| 250
|
from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, processors
from tokenizers.implementations import BaseTokenizer
class TestBaseTokenizer:
def test_get_set_components(self):
toki = Tokenizer(models.BPE())
toki.normalizer = normalizers.NFC()
toki.pre_tokenizer = pre_tokenizers.ByteLevel()
toki.post_processor = processors.BertProcessing(("A", 0), ("B", 1))
toki.decoder = decoders.ByteLevel()
tokenizer = BaseTokenizer(toki)
assert isinstance(tokenizer.model, models.BPE)
assert isinstance(tokenizer.normalizer, normalizers.NFC)
assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.ByteLevel)
assert isinstance(tokenizer.post_processor, processors.BertProcessing)
assert isinstance(tokenizer.decoder, decoders.ByteLevel)
tokenizer.model = models.Unigram()
assert isinstance(tokenizer.model, models.Unigram)
tokenizer.normalizer = normalizers.NFD()
assert isinstance(tokenizer.normalizer, normalizers.NFD)
tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.Whitespace)
tokenizer.post_processor = processors.ByteLevel()
assert isinstance(tokenizer.post_processor, processors.ByteLevel)
tokenizer.decoder = decoders.WordPiece()
assert isinstance(tokenizer.decoder, decoders.WordPiece)
|
tokenizers/bindings/python/tests/implementations/test_base_tokenizer.py/0
|
{
"file_path": "tokenizers/bindings/python/tests/implementations/test_base_tokenizer.py",
"repo_id": "tokenizers",
"token_count": 550
}
| 251
|
# Normalizers
<tokenizerslangcontent>
<python>
## BertNormalizer
[[autodoc]] tokenizers.normalizers.BertNormalizer
## Lowercase
[[autodoc]] tokenizers.normalizers.Lowercase
## NFC
[[autodoc]] tokenizers.normalizers.NFC
## NFD
[[autodoc]] tokenizers.normalizers.NFD
## NFKC
[[autodoc]] tokenizers.normalizers.NFKC
## NFKD
[[autodoc]] tokenizers.normalizers.NFKD
## Nmt
[[autodoc]] tokenizers.normalizers.Nmt
## Normalizer
[[autodoc]] tokenizers.normalizers.Normalizer
## Precompiled
[[autodoc]] tokenizers.normalizers.Precompiled
## Replace
[[autodoc]] tokenizers.normalizers.Replace
## Sequence
[[autodoc]] tokenizers.normalizers.Sequence
## Strip
[[autodoc]] tokenizers.normalizers.Strip
## StripAccents
[[autodoc]] tokenizers.normalizers.StripAccents
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent>
|
tokenizers/docs/source-doc-builder/api/normalizers.mdx/0
|
{
"file_path": "tokenizers/docs/source-doc-builder/api/normalizers.mdx",
"repo_id": "tokenizers",
"token_count": 350
}
| 252
|
🤗 Tokenizers is tested on Python 3.5+.
You should install 🤗 Tokenizers in a
`virtual environment <https://docs.python.org/3/library/venv.html>`_. If you're unfamiliar with
Python virtual environments, check out the
`user guide <https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/>`__.
Create a virtual environment with the version of Python you're going to use and activate it.
Installation with pip
----------------------------------------------------------------------------------------------------
🤗 Tokenizers can be installed using pip as follows::
pip install tokenizers
Installation from sources
----------------------------------------------------------------------------------------------------
To use this method, you need to have the Rust language installed. You can follow
`the official guide <https://www.rust-lang.org/learn/get-started>`__ for more information.
If you are using a unix based OS, the installation should be as simple as running::
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
Or you can easiy update it with the following command::
rustup update
Once rust is installed, we can start retrieving the sources for 🤗 Tokenizers::
git clone https://github.com/huggingface/tokenizers
Then we go into the python bindings folder::
cd tokenizers/bindings/python
At this point you should have your `virtual environment`_ already activated. In order to
compile 🤗 Tokenizers, you need to::
pip install -e .
|
tokenizers/docs/source/installation/python.inc/0
|
{
"file_path": "tokenizers/docs/source/installation/python.inc",
"repo_id": "tokenizers",
"token_count": 384
}
| 253
|
#[macro_use]
extern crate criterion;
use criterion::Criterion;
use std::collections::HashMap;
use std::fs::read_to_string;
use std::time::{Duration, Instant};
use tokenizers::models::unigram::Unigram;
use tokenizers::models::unigram::UnigramTrainer;
pub fn bench_train(c: &mut Criterion) {
let trainer = UnigramTrainer::builder()
.show_progress(false)
.unk_token(Some("<UNK>".into()))
.build()
.unwrap();
let mut model = Unigram::default();
let content = read_to_string("data/small.txt").unwrap();
let mut word_counts = HashMap::new();
content.split_whitespace().for_each(|word| {
// This is important for the test of char vs u8
let word = format!("▁{}", word);
*word_counts.entry(word).or_insert(0) += 1;
});
let sentences: Vec<_> = word_counts
.iter()
.map(|(s, i)| (s.to_owned(), *i))
.collect();
c.bench_function("Unigram Train vocabulary (small)", |b| {
b.iter_custom(|iters| {
let mut duration = Duration::new(0, 0);
for _i in 0..iters {
let sentences = sentences.clone();
let start = Instant::now();
trainer.do_train(sentences, &mut model).unwrap();
duration = duration.checked_add(start.elapsed()).unwrap();
}
duration
})
});
let content = read_to_string("data/big.txt").unwrap();
// creating `medium` data, which is the first 25% of `data/big.txt`
let content = String::from(&content[..(content.len() as f64 * 0.25) as usize]);
let mut word_counts = HashMap::new();
content.split_whitespace().for_each(|word| {
// This is important for the test of char vs u8
let word = format!("▁{}", word);
*word_counts.entry(word).or_insert(0) += 1;
});
let sentences: Vec<_> = word_counts
.iter()
.map(|(s, i)| (s.to_owned(), *i))
.collect();
c.bench_function("Unigram Train vocabulary (medium)", |b| {
b.iter_custom(|iters| {
let mut duration = Duration::new(0, 0);
for _i in 0..iters {
let sentences = sentences.clone();
let start = Instant::now();
trainer.do_train(sentences, &mut model).unwrap();
duration = duration.checked_add(start.elapsed()).unwrap();
}
duration
})
});
}
criterion_group! {
name = benches_train;
config = Criterion::default().sample_size(10);
targets = bench_train
}
criterion_main!(benches_train);
|
tokenizers/tokenizers/benches/unigram_benchmark.rs/0
|
{
"file_path": "tokenizers/tokenizers/benches/unigram_benchmark.rs",
"repo_id": "tokenizers",
"token_count": 1174
}
| 254
|
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hello wasm-pack!</title>
</head>
<body>
<noscript>This page contains webassembly and javascript content, please enable javascript in your browser.</noscript>
<script src="./bootstrap.js"></script>
</body>
</html>
|
tokenizers/tokenizers/examples/unstable_wasm/www/index.html/0
|
{
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/index.html",
"repo_id": "tokenizers",
"token_count": 110
}
| 255
|
use super::{super::OrderedVocabIter, trainer::BpeTrainer, Error, Pair, Word};
use crate::tokenizer::{Model, Result, Token};
use crate::utils::cache::{Cache, DEFAULT_CACHE_CAPACITY};
use crate::utils::iter::ResultShunt;
use serde_json::Value;
use std::borrow::Cow;
use std::{
collections::HashMap,
fs::File,
io::prelude::*,
io::{BufRead, BufReader},
path::{Path, PathBuf},
};
pub type Vocab = HashMap<String, u32>;
type VocabR = HashMap<u32, String>;
pub type MergeMap = HashMap<Pair, (u32, u32)>;
pub type Merges = Vec<(String, String)>;
struct Config {
files: Option<(String, String)>,
vocab: Vocab,
merges: Merges,
cache_capacity: usize,
dropout: Option<f32>,
unk_token: Option<String>,
continuing_subword_prefix: Option<String>,
end_of_word_suffix: Option<String>,
fuse_unk: bool,
byte_fallback: bool,
ignore_merges: bool,
}
/// A `BpeBuilder` can be used to create a `BPE` model with a custom configuration.
pub struct BpeBuilder {
config: Config,
}
impl Default for BpeBuilder {
fn default() -> Self {
Self {
config: Config {
files: None,
vocab: HashMap::new(),
merges: vec![],
cache_capacity: DEFAULT_CACHE_CAPACITY,
dropout: None,
unk_token: None,
continuing_subword_prefix: None,
end_of_word_suffix: None,
fuse_unk: false,
byte_fallback: false,
ignore_merges: false,
},
}
}
}
impl BpeBuilder {
/// Constructs a new `BpeBuilder`.
pub fn new() -> Self {
Self::default()
}
/// Set the input files.
#[must_use]
pub fn files(mut self, vocab: String, merges: String) -> Self {
self.config.files = Some((vocab, merges));
self
}
/// Set the vocab (token -> ID) and merges mappings.
#[must_use]
pub fn vocab_and_merges(mut self, vocab: Vocab, merges: Merges) -> Self {
self.config.vocab = vocab;
self.config.merges = merges;
self
}
/// Set the cache's capacity. Set to 0 if you want to disable caching.
#[must_use]
pub fn cache_capacity(mut self, capacity: usize) -> Self {
self.config.cache_capacity = capacity;
self
}
/// Use [dropout](https://arxiv.org/abs/1910.13267) with the model.
#[must_use]
pub fn dropout(mut self, dropout: f32) -> Self {
self.config.dropout = Some(dropout);
self
}
/// Set the `UNK` token for the vocab.
#[must_use]
pub fn unk_token(mut self, unk_token: String) -> Self {
self.config.unk_token = Some(unk_token);
self
}
/// Set the `continuing_subword_prefix` option.
#[must_use]
pub fn continuing_subword_prefix(mut self, prefix: String) -> Self {
self.config.continuing_subword_prefix = Some(prefix);
self
}
/// Set the `end_of_word_suffix` option.
#[must_use]
pub fn end_of_word_suffix(mut self, prefix: String) -> Self {
self.config.end_of_word_suffix = Some(prefix);
self
}
/// Set the `fuse_unk` option.
#[must_use]
pub fn fuse_unk(mut self, fuse_unk: bool) -> Self {
self.config.fuse_unk = fuse_unk;
self
}
/// Set the `byte_fallback` option.
#[must_use]
pub fn byte_fallback(mut self, byte_fallback: bool) -> Self {
self.config.byte_fallback = byte_fallback;
self
}
/// Set the `ignore_merges` option.
#[must_use]
pub fn ignore_merges(mut self, ignore_merges: bool) -> Self {
self.config.ignore_merges = ignore_merges;
self
}
/// Returns a `BPE` model that uses the `BpeBuilder`'s configuration.
pub fn build(mut self) -> Result<BPE> {
// Validate dropout.
if let Some(p) = self.config.dropout {
if !(0.0..=1.0).contains(&p) {
return Err(Error::InvalidDropout.into());
}
}
// Read files if necessary
if let Some((vocab, merges)) = self.config.files {
let (v, m) = BPE::read_file(&vocab, &merges)?;
self.config.vocab = v;
self.config.merges = m;
}
let vocab_r = self
.config
.vocab
.iter()
.map(|(key, val)| (*val, key.to_owned()))
.collect();
let cache = match self.config.cache_capacity {
0 => None,
capacity => Some(Cache::new(capacity)),
};
let vocab = self.config.vocab;
let prefix_len = if let Some(prefix) = &self.config.continuing_subword_prefix {
prefix.len()
} else {
0
};
let merge_map: MergeMap = self
.config
.merges
.into_iter()
.enumerate()
.map(|(i, (a, b))| -> Result<(Pair, (u32, u32))> {
let a_id = vocab
.get(&a)
.ok_or_else(|| Error::MergeTokenOutOfVocabulary(a.to_owned()))?;
let b_id = vocab
.get(&b)
.ok_or_else(|| Error::MergeTokenOutOfVocabulary(b.to_owned()))?;
let new_token = format!("{}{}", a, &b[prefix_len..]);
let new_id = vocab
.get(&new_token)
.ok_or(Error::MergeTokenOutOfVocabulary(new_token))?;
Ok(((*a_id, *b_id), (i as u32, *new_id)))
})
.collect::<Result<MergeMap>>()?;
// merges.insert(pair, (rank as u32, *new_id));
Ok(BPE {
vocab,
vocab_r,
merges: merge_map,
cache,
dropout: self.config.dropout,
unk_token: self.config.unk_token,
continuing_subword_prefix: self.config.continuing_subword_prefix,
end_of_word_suffix: self.config.end_of_word_suffix,
fuse_unk: self.config.fuse_unk,
byte_fallback: self.config.byte_fallback,
ignore_merges: self.config.ignore_merges,
})
}
}
/// A [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model.
#[derive(PartialEq)]
pub struct BPE {
/// The vocabulary assigns a number to each token.
pub(crate) vocab: Vocab,
/// Reversed vocabulary, to rebuild sentences.
pub(crate) vocab_r: VocabR,
/// Contains the mapping between Pairs and their (rank, new_id).
pub(crate) merges: MergeMap,
/// Contains the cache for optimizing the encoding step.
cache: Option<Cache<String, Word>>,
/// Dropout probability for merges. 0.0 = no dropout is the default. At 1.0, tokenization will
/// perform no merges, so the result will just be characters.
pub dropout: Option<f32>,
/// The unknown token to be used when we encounter an unknown char
pub unk_token: Option<String>,
/// An optional prefix to use on any subword that exist only behind another one
pub continuing_subword_prefix: Option<String>,
/// An optional suffix to caracterize and end-of-word subword
pub end_of_word_suffix: Option<String>,
/// Do multiple unk tokens get fused
pub fuse_unk: bool,
/// Byte fallback from sentence pieces, instead of UNK, uses `"<0x00>"`
/// for each byte in the unk token
pub byte_fallback: bool,
/// Whether or not to direct output words if they are part of the vocab.
pub ignore_merges: bool,
}
impl std::fmt::Debug for BPE {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("BPE")
.field("dropout", &self.dropout)
.field("unk_token", &self.unk_token)
.field("continuing_subword_prefix", &self.continuing_subword_prefix)
.field("end_of_word_suffix", &self.end_of_word_suffix)
.field("fuse_unk", &self.fuse_unk)
.field("byte_fallback", &self.byte_fallback)
.field("vocab", &self.vocab.len())
.field("merges", &self.merges.len())
.field("ignore_merges", &self.ignore_merges)
.finish()
}
}
impl Default for BPE {
fn default() -> Self {
Self::builder().build().unwrap()
}
}
impl Clone for BPE {
// `Clone` can't be derive because it's not implemented for `Cache`.
// To keep things simple when we clone, the new BPE will start with a fresh cache.
fn clone(&self) -> Self {
let fresh_cache = self.cache.as_ref().map(|cache| cache.fresh());
Self {
vocab: self.vocab.clone(),
vocab_r: self.vocab_r.clone(),
merges: self.merges.clone(),
cache: fresh_cache,
dropout: self.dropout,
unk_token: self.unk_token.clone(),
continuing_subword_prefix: self.continuing_subword_prefix.clone(),
end_of_word_suffix: self.end_of_word_suffix.clone(),
fuse_unk: self.fuse_unk,
byte_fallback: self.byte_fallback,
ignore_merges: self.ignore_merges,
}
}
}
/// Converts the merges strings (for example from `merges.txt` file) with the format
/// "{pair_a} {pair_b}" into the format expected by the BPE struct
pub(crate) fn convert_merges_to_hashmap<I: Iterator<Item = String>>(
iter: I,
_vocab: &Vocab,
) -> Result<Merges> {
let mut merges = vec![];
let lines = iter.filter(|l| !l.starts_with("#version"));
for (rank, line) in lines.enumerate() {
let parts = line.split(' ').collect::<Vec<_>>();
if parts.len() != 2 {
return Err(Error::BadMerges(rank + 1).into());
}
merges.push((parts[0].to_string(), parts[1].to_string()));
}
Ok(merges)
}
impl BPE {
/// Initialize a `BpeBuilder`.
pub fn builder() -> BpeBuilder {
BpeBuilder::new()
}
/// Create a new BPE model with the given vocab and merges.
pub fn new(vocab: Vocab, merges: Merges) -> Self {
Self::builder()
.vocab_and_merges(vocab, merges)
.build()
.unwrap()
}
/// Initialize a BpeBuilder model from vocab and merges files
pub fn from_file(vocab: &str, merges: &str) -> BpeBuilder {
Self::builder().files(vocab.to_owned(), merges.to_owned())
}
/// Read the given files to extract the vocab and merges
pub fn read_file(vocab: &str, merges: &str) -> Result<(Vocab, Merges)> {
// Read vocab.json
let vocab_file = File::open(vocab)?;
let mut vocab_file = BufReader::new(vocab_file);
let mut buffer = String::new();
vocab_file.read_to_string(&mut buffer)?;
let json: Value = serde_json::from_str(&buffer)?;
let mut vocab = HashMap::new();
match json {
Value::Object(m) => {
for (token, id) in m {
if let Value::Number(id) = id {
let id = id.as_u64().ok_or(Error::BadVocabulary)? as u32;
vocab.insert(token, id);
}
}
}
_ => return Err(Box::new(Error::BadVocabulary)),
};
// Read merges file
let merge_file = File::open(merges)?;
let merge_file = BufReader::new(merge_file);
let merges = ResultShunt::process(merge_file.lines(), |iter| {
convert_merges_to_hashmap(iter, &vocab)
})??;
Ok((vocab, merges))
}
/// Reset the cache.
pub fn clear_cache(&self) {
if let Some(ref cache) = self.cache {
cache.clear()
}
}
pub fn get_vocab(&self) -> Vocab {
self.vocab.clone()
}
pub fn get_unk_token(&self) -> &Option<String> {
&self.unk_token
}
pub fn get_continuing_subword_prefix(&self) -> &Option<String> {
&self.continuing_subword_prefix
}
fn merge_word(&self, w: &str) -> Result<Word> {
let mut indices = w.char_indices().map(|(idx, _)| idx).peekable();
let mut word = Word::with_capacity(w.len());
let mut unk: Option<(u32, usize)> = None;
while let Some(i) = indices.next() {
let end = indices.peek();
let is_first = i == 0;
let is_last = end.is_none();
let mut s = if let Some(e) = end {
Cow::Borrowed(&w[i..*e])
} else {
Cow::Borrowed(&w[i..])
};
let byte_len = s.len();
// Add the `continuing_subword_prefix` if relevant
if !is_first {
if let Some(ref prefix) = self.continuing_subword_prefix {
s = format!("{}{}", prefix, s).into()
}
}
// Add the `end_of_word_suffix` if relevant
if is_last {
if let Some(ref suffix) = self.end_of_word_suffix {
s = format!("{}{}", s, suffix).into()
}
}
if let Some(id) = self.vocab.get(s.as_ref()) {
if let Some((unk_id, unk_len)) = unk {
word.add(unk_id, unk_len);
unk = None;
}
word.add(*id, byte_len);
} else {
if self.byte_fallback {
let tokens: Option<Vec<_>> = s
.bytes()
.map(|b| -> Option<&u32> {
let code = format!("<{:#04X}>", b);
self.vocab.get(&code)
})
.collect();
if let Some(tokens) = tokens {
for t in tokens {
word.add(*t, 1);
}
continue;
}
}
if let Some(unk_token) = &self.unk_token {
unk = match (unk, self.fuse_unk) {
(Some((unk_id, unk_len)), true) => {
// Fuse unk
Some((unk_id, unk_len + byte_len))
}
(Some((unk_id, unk_len)), false) => {
// Do not fuse unk, add the previous one
word.add(unk_id, unk_len);
Some((
*self.vocab.get(unk_token).ok_or_else(|| {
Error::UnkTokenOutOfVocabulary(unk_token.to_owned())
})?,
byte_len,
))
}
_ => Some((
*self.vocab.get(unk_token).ok_or_else(|| {
Error::UnkTokenOutOfVocabulary(unk_token.to_owned())
})?,
byte_len,
)),
};
}
}
}
if let Some((unk_id, unk_len)) = unk {
word.add(unk_id, unk_len);
}
word.merge_all(&self.merges, self.dropout);
Ok(word)
}
fn word_to_tokens<'a, 'b: 'a>(&'a self, word: &'b Word) -> impl Iterator<Item = Token> + 'a {
word.get_chars_iter()
.zip(word.get_offsets_iter())
.map(move |(id, offsets)| Token::new(id, self.vocab_r[&id].clone(), offsets))
}
fn tokenize_with_cache(&self, sequence: &str) -> Result<Vec<Token>> {
if self.ignore_merges {
if let Some(id) = self.vocab.get(sequence) {
return Ok(vec![Token::new(*id, sequence.to_string().clone(), (0, 0))]);
}
}
if let Some(ref hit) = self.cache.as_ref().and_then(|c| c.get(sequence)) {
return Ok(self.word_to_tokens(hit).collect());
}
let word = self.merge_word(sequence)?;
let ret = self.word_to_tokens(&word).collect();
if let Some(ref cache) = self.cache {
cache.set(sequence.to_owned(), word);
}
Ok(ret)
}
}
impl Model for BPE {
type Trainer = BpeTrainer;
fn get_vocab(&self) -> HashMap<String, u32> {
self.vocab.clone()
}
fn get_vocab_size(&self) -> usize {
self.vocab.len()
}
fn tokenize(&self, sequence: &str) -> Result<Vec<Token>> {
if sequence.is_empty() {
return Ok(vec![]);
}
if self.dropout.is_none() || self.dropout == Some(0.0) {
self.tokenize_with_cache(sequence)
} else {
let word = self.merge_word(sequence)?;
Ok(self.word_to_tokens(&word).collect())
}
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.vocab.get(token).copied()
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.vocab_r.get(&id).cloned()
}
fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> {
let vocab_file_name = match name {
Some(name) => format!("{}-vocab.json", name),
None => "vocab.json".to_string(),
};
// Write vocab.json
let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())]
.iter()
.collect();
let mut vocab_file = File::create(&vocab_path)?;
let order_vocab_iter = OrderedVocabIter::new(&self.vocab_r);
let serialized = serde_json::to_string(&order_vocab_iter)?;
vocab_file.write_all(serialized.as_bytes())?;
// Write merges.txt
let merges_file_name = match name {
Some(name) => format!("{}-merges.txt", name),
None => "merges.txt".to_string(),
};
let merges_path: PathBuf = [folder, Path::new(merges_file_name.as_str())]
.iter()
.collect();
let mut merges_file = File::create(&merges_path)?;
let mut merges: Vec<(&Pair, &u32)> = self
.merges
.iter()
.map(|(pair, (rank, _))| (pair, rank))
.collect();
merges.sort_unstable_by_key(|k| *k.1);
merges_file.write_all(b"#version: 0.2\n")?;
merges_file.write_all(
&merges
.into_iter()
.flat_map(|(pair, _)| {
format!("{} {}\n", self.vocab_r[&pair.0], self.vocab_r[&pair.1]).into_bytes()
})
.collect::<Vec<_>>()[..],
)?;
Ok(vec![vocab_path, merges_path])
}
fn get_trainer(&self) -> BpeTrainer {
BpeTrainer::default()
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::NamedTempFile;
#[test]
fn test_ordered_vocab_iter() {
let vocab_r: VocabR = [
(0, "a".into()),
(1, "b".into()),
(2, "c".into()),
(3, "ab".into()),
]
.iter()
.cloned()
.collect();
let order_vocab_iter = OrderedVocabIter::new(&vocab_r);
let serialized = serde_json::to_string(&order_vocab_iter).unwrap();
assert_eq!(serialized, "{\"a\":0,\"b\":1,\"c\":2,\"ab\":3}");
}
#[test]
fn test_unk_not_fused() {
let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)]
.iter()
.cloned()
.collect();
let bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![])
.unk_token("<unk>".to_string())
.build()
.unwrap();
let tokens = bpe.tokenize("c").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]);
let tokens = bpe.tokenize("cc").unwrap();
assert_eq!(
tokens,
vec![
Token::new(0u32, "<unk>".into(), (0, 1)),
Token::new(0u32, "<unk>".into(), (1, 2)),
]
);
let tokens = bpe.tokenize("accb").unwrap();
assert_eq!(
tokens,
vec![
Token::new(1u32, "a".into(), (0, 1)),
Token::new(0u32, "<unk>".into(), (1, 2)),
Token::new(0u32, "<unk>".into(), (2, 3)),
Token::new(2u32, "b".into(), (3, 4)),
]
);
}
#[test]
fn test_unk_get_fused() {
let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)]
.iter()
.cloned()
.collect();
let bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![])
.unk_token("<unk>".to_string())
.fuse_unk(true)
.build()
.unwrap();
let tokens = bpe.tokenize("c").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]);
let tokens = bpe.tokenize("cc").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 2)),]);
let tokens = bpe.tokenize("accb").unwrap();
assert_eq!(
tokens,
vec![
Token::new(1u32, "a".into(), (0, 1)),
Token::new(0u32, "<unk>".into(), (1, 3)),
Token::new(2u32, "b".into(), (3, 4)),
]
);
}
#[test]
// Test tokenization. With dropout set to 0 tokenization is deterministic,
// so we know exactly what the result should be.
//
// To test this, we'll build a simple model to tokenize the word 'unrelated'.
fn test_tokenize_with_and_without_dropout() {
let vocab: Vocab = [
("u".into(), 0),
("n".into(), 1),
("r".into(), 2),
("e".into(), 3),
("l".into(), 4),
("a".into(), 5),
("t".into(), 6),
("d".into(), 7),
("re".into(), 8),
("at".into(), 9),
("ed".into(), 10),
("un".into(), 11),
("ated".into(), 12),
("rel".into(), 13),
("related".into(), 14),
("unrelated".into(), 15),
]
.iter()
.cloned()
.collect();
let merges: Merges = vec![
("r".to_string(), "e".to_string()),
("a".to_string(), "t".to_string()),
("e".to_string(), "d".to_string()),
("u".to_string(), "n".to_string()),
("at".to_string(), "ed".to_string()),
("re".to_string(), "l".to_string()),
("rel".to_string(), "ated".to_string()),
("un".to_string(), "related".to_string()),
];
let mut bpe = BPE::new(vocab, merges);
// With no dropout:
let tokens = bpe.tokenize("unrelated").unwrap();
assert_eq!(tokens, vec![Token::new(15u32, "unrelated".into(), (0, 9))]);
// With dropout = 0.0 (equivalent to dropout == none)
bpe.dropout = Some(0.0);
let tokens = bpe.tokenize("unrelated").unwrap();
assert_eq!(tokens, vec![Token::new(15u32, "unrelated".into(), (0, 9))]);
// Now set dropout to 1.0. Result should be no merges performed.
bpe.dropout = Some(1.0);
let tokens = bpe.tokenize("unrelated").unwrap();
assert_eq!(
tokens,
vec![
Token::new(0u32, "u".into(), (0, 1)),
Token::new(1u32, "n".into(), (1, 2)),
Token::new(2u32, "r".into(), (2, 3)),
Token::new(3u32, "e".into(), (3, 4)),
Token::new(4u32, "l".into(), (4, 5)),
Token::new(5u32, "a".into(), (5, 6)),
Token::new(6u32, "t".into(), (6, 7)),
Token::new(3u32, "e".into(), (7, 8)),
Token::new(7u32, "d".into(), (8, 9)),
]
);
// Now try with dropout between 0 and 1.
bpe.dropout = Some(0.5);
let tokens = bpe.tokenize("unrelated").unwrap();
assert!(!tokens.is_empty() && tokens.len() <= 9);
}
#[test]
// Ensure `BPE::from_file` works as expected.
fn test_bpe_from_file() {
// Set up vocab file.
let mut vocab_file = NamedTempFile::new().unwrap();
vocab_file
.write_all(b"{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}")
.unwrap();
// Set up merges file.
let mut merges_file = NamedTempFile::new().unwrap();
merges_file.write_all(b"#version: 0.2\na b").unwrap();
// Make sure we can instantiate a BPE model from the files.
let builder = BPE::from_file(
vocab_file.path().to_str().unwrap(),
merges_file.path().to_str().unwrap(),
);
let bpe = builder.build().unwrap();
// Check merges.
assert_eq!(bpe.merges.get(&(0, 1)).unwrap(), &(0u32, 3u32));
// Check vocab.
assert_eq!(bpe.vocab.get("a").unwrap(), &0u32);
assert_eq!(bpe.vocab.get("b").unwrap(), &1u32);
assert_eq!(bpe.vocab.get("c").unwrap(), &2u32);
assert_eq!(bpe.vocab.get("ab").unwrap(), &3u32);
}
#[test]
// Ensure BPEBuilder with dropout = 0.0 doesn't error
fn test_bpe_with_dropout_0() {
let bpe = BPE::builder().dropout(0.0).build().unwrap();
assert_eq!(bpe.dropout, Some(0.0));
}
#[test]
// Ensure `BPE::from_file` works as expected.
fn test_bpe_with_continuing_subword_prefix() {
let vocab: Vocab = vec![
("a".to_string(), 0),
("##b".to_string(), 1),
("##c".to_string(), 2),
("ab".to_string(), 3),
("abc".to_string(), 4),
]
.into_iter()
.collect();
let merges = vec![
("a".to_string(), "##b".to_string()),
("ab".to_string(), "##c".to_string()),
];
let bpe = BPE::builder()
.vocab_and_merges(vocab, merges)
.unk_token("[UNK]".to_string())
.continuing_subword_prefix("##".to_string())
.build()
.unwrap();
let res = bpe.tokenize("ab");
assert_eq!(
res.unwrap(),
vec![Token {
id: 3,
value: "ab".to_string(),
offsets: (0, 2)
}]
);
let res = bpe.tokenize("abc");
assert_eq!(
res.unwrap(),
vec![Token {
id: 4,
value: "abc".to_string(),
offsets: (0, 3)
}]
);
}
#[test]
// Ensure `MergeTokenOutOfVocabulary` error is returned when it should be.
fn test_bpe_from_file_merge_token_oov() {
// Set up vocab file.
let mut vocab_file = NamedTempFile::new().unwrap();
vocab_file
.write_all(b"{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}")
.unwrap();
// Set up merges file.
let mut merges_file = NamedTempFile::new().unwrap();
merges_file.write_all(b"#version: 0.2\na b\na d").unwrap();
// Ensure the result of BPE::from_file is a MergeTokenOutOfVocabulary error.
match BPE::from_file(
vocab_file.path().to_str().unwrap(),
merges_file.path().to_str().unwrap(),
)
.build()
{
Ok(_) => unreachable!(),
Err(err) => match err.downcast_ref::<Error>() {
Some(Error::MergeTokenOutOfVocabulary(token)) => {
assert_eq!(*token, String::from("d"))
}
_ => unreachable!(),
},
}
}
#[test]
// Ensure `BadMerges` error is returned when there is an invalid line in the
// merges.txt file.
fn test_bpe_from_file_bad_merges() {
// Set up vocab file.
let mut vocab_file = NamedTempFile::new().unwrap();
vocab_file
.write_all("{\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}".as_bytes())
.unwrap();
// Set up merges file with a bad line.
let mut merges_file = NamedTempFile::new().unwrap();
merges_file.write_all(b"#version: 0.2\na b\nc").unwrap();
// Ensure the result of BPE::from_file is a BadMerges error.
match BPE::from_file(
vocab_file.path().to_str().unwrap(),
merges_file.path().to_str().unwrap(),
)
.build()
{
Ok(_) => unreachable!(),
Err(err) => match err.downcast_ref::<Error>() {
Some(Error::BadMerges(line)) => assert_eq!(*line, 2),
_ => unreachable!(),
},
}
}
#[test]
fn test_bpe_byte_fallback() {
// 0x61 == 'a' in bytes
let vocab: Vocab = [("<unk>".into(), 0), ("<0x61>".into(), 1)]
.iter()
.cloned()
.collect();
let bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![])
.unk_token("<unk>".to_string())
.byte_fallback(true)
.build()
.unwrap();
let tokens = bpe.tokenize("c").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]);
let tokens = bpe.tokenize("a").unwrap();
assert_eq!(tokens, vec![Token::new(1u32, "<0x61>".into(), (0, 1)),]);
}
#[test]
fn test_bpe_byte_fallback_newline() {
// 0x0A == '\n' in bytes
let vocab: Vocab = [("<unk>".into(), 0), ("<0x0A>".into(), 1)]
.iter()
.cloned()
.collect();
let bpe = BpeBuilder::default()
.vocab_and_merges(vocab, vec![])
.unk_token("<unk>".to_string())
.byte_fallback(true)
.build()
.unwrap();
let tokens = bpe.tokenize("\n").unwrap();
assert_eq!(tokens, vec![Token::new(1u32, "<0x0A>".into(), (0, 1)),]);
}
#[test]
fn test_ignore_merges() {
// 0x0A == '\n' in bytes
let vocab: Vocab = [
(".:.:".into(), 0),
("Ġbelirtilen".into(), 1),
(".".into(), 2),
(":".into(), 3),
("bel".into(), 4),
("irtilen".into(), 5),
("Ġ".into(), 6),
(".:".into(), 7),
("belirtilen".into(), 8),
(".:.".into(), 9),
("be".into(), 10),
("l".into(), 11),
("ir".into(), 12),
("ti".into(), 13),
("en".into(), 14),
("irtil".into(), 15),
("irti".into(), 16),
("i".into(), 17),
("r".into(), 18),
("t".into(), 19),
("b".into(), 20),
("e".into(), 21),
("n".into(), 22),
]
.iter()
.cloned()
.collect();
let mut bpe = BpeBuilder::default()
.vocab_and_merges(
vocab,
vec![
(".".into(), ":".into()),
("b".into(), "e".into()),
("be".into(), "l".into()),
("i".into(), "r".into()),
("t".into(), "i".into()),
("ir".into(), "ti".into()),
("e".into(), "n".into()),
("irti".into(), "l".into()),
],
)
.ignore_merges(true)
.build()
.unwrap();
let tokens = bpe.tokenize(".:.:").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, ".:.:".into(), (0, 0))]);
let tokens = bpe.tokenize("Ġbelirtilen").unwrap();
assert_eq!(tokens, vec![Token::new(1u32, "Ġbelirtilen".into(), (0, 0))]);
bpe.ignore_merges = false;
let tokens = bpe.tokenize(".:.:").unwrap();
assert_eq!(
tokens,
vec![
Token::new(7u32, ".:".into(), (0, 2)),
Token::new(7u32, ".:".into(), (2, 4))
]
);
let tokens = bpe.tokenize("Ġbelirtilen").unwrap();
assert_eq!(
tokens,
vec![
Token {
id: 6,
value: "Ġ".into(),
offsets: (0, 2)
},
Token {
id: 4,
value: "bel".into(),
offsets: (2, 5)
},
Token {
id: 15,
value: "irtil".into(),
offsets: (5, 10)
},
Token {
id: 14,
value: "en".into(),
offsets: (10, 12)
}
]
)
}
}
|
tokenizers/tokenizers/src/models/bpe/model.rs/0
|
{
"file_path": "tokenizers/tokenizers/src/models/bpe/model.rs",
"repo_id": "tokenizers",
"token_count": 17495
}
| 256
|
use super::WordPiece;
use crate::models::bpe::{BpeTrainer, BpeTrainerBuilder, BPE};
use crate::tokenizer::{AddedToken, Result, Trainer};
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
/// A `WordPieceTrainerBuilder` can be used to create a `WordPieceTrainer` with a custom
/// configuration.
pub struct WordPieceTrainerBuilder {
bpe_trainer_builder: BpeTrainerBuilder,
}
impl Default for WordPieceTrainerBuilder {
fn default() -> Self {
Self {
bpe_trainer_builder: BpeTrainerBuilder::new().continuing_subword_prefix("##".into()),
}
}
}
impl WordPieceTrainerBuilder {
/// Constructs a new `WordPieceTrainerBuilder`
pub fn new() -> Self {
Self::default()
}
/// Set the expected minimum frequency
#[must_use]
pub fn min_frequency(mut self, frequency: u64) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.min_frequency(frequency);
self
}
/// Set the vocabulary size
#[must_use]
pub fn vocab_size(mut self, size: usize) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.vocab_size(size);
self
}
/// Set whether to show progress
#[must_use]
pub fn show_progress(mut self, show: bool) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.show_progress(show);
self
}
/// Set the special tokens
#[must_use]
pub fn special_tokens(mut self, tokens: Vec<AddedToken>) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.special_tokens(tokens);
self
}
/// Set whether to limit the alphabet
#[must_use]
pub fn limit_alphabet(mut self, limit: usize) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.limit_alphabet(limit);
self
}
/// Set the initial alphabet
#[must_use]
pub fn initial_alphabet(mut self, alphabet: HashSet<char>) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.initial_alphabet(alphabet);
self
}
/// Set the continuing_subword_prefix
#[must_use]
pub fn continuing_subword_prefix(mut self, prefix: String) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.continuing_subword_prefix(prefix);
self
}
/// Set the end_of_word_suffix
#[must_use]
pub fn end_of_word_suffix(mut self, suffix: String) -> Self {
self.bpe_trainer_builder = self.bpe_trainer_builder.end_of_word_suffix(suffix);
self
}
/// Constructs the final BpeTrainer
pub fn build(self) -> WordPieceTrainer {
let bpe_trainer = self.bpe_trainer_builder.build();
WordPieceTrainer { bpe_trainer }
}
}
/// Trains a `WordPiece` model.
#[derive(Default, Clone, Deserialize, Serialize)]
pub struct WordPieceTrainer {
bpe_trainer: BpeTrainer,
}
impl WordPieceTrainer {
pub fn min_frequency(&self) -> u64 {
self.bpe_trainer.min_frequency
}
pub fn set_min_frequency(&mut self, freq: u64) {
self.bpe_trainer.min_frequency = freq;
}
pub fn vocab_size(&self) -> usize {
self.bpe_trainer.vocab_size
}
pub fn set_vocab_size(&mut self, size: usize) {
self.bpe_trainer.vocab_size = size;
}
pub fn show_progress(&self) -> bool {
self.bpe_trainer.show_progress
}
pub fn set_show_progress(&mut self, show_progress: bool) {
self.bpe_trainer.show_progress = show_progress;
}
pub fn special_tokens(&self) -> &[AddedToken] {
&self.bpe_trainer.special_tokens
}
pub fn set_special_tokens(&mut self, special_tokens: Vec<AddedToken>) {
self.bpe_trainer.special_tokens = special_tokens;
}
pub fn limit_alphabet(&self) -> Option<usize> {
self.bpe_trainer.limit_alphabet
}
pub fn set_limit_alphabet(&mut self, limit: Option<usize>) {
self.bpe_trainer.limit_alphabet = limit;
}
pub fn initial_alphabet(&self) -> &HashSet<char> {
&self.bpe_trainer.initial_alphabet
}
pub fn set_initial_alphabet(&mut self, alphabet: HashSet<char>) {
self.bpe_trainer.initial_alphabet = alphabet;
}
pub fn continuing_subword_prefix(&self) -> &Option<String> {
&self.bpe_trainer.continuing_subword_prefix
}
pub fn set_continuing_subword_prefix(&mut self, prefix: Option<String>) {
self.bpe_trainer.continuing_subword_prefix = prefix;
}
pub fn end_of_word_suffix(&self) -> &Option<String> {
&self.bpe_trainer.end_of_word_suffix
}
pub fn set_end_of_word_suffix(&mut self, suffix: Option<String>) {
self.bpe_trainer.end_of_word_suffix = suffix;
}
pub fn builder() -> WordPieceTrainerBuilder {
WordPieceTrainerBuilder::default()
}
pub fn train(&self, model: &mut WordPiece) -> Result<Vec<AddedToken>> {
let mut bpe = BPE::default();
let special_tokens = self.bpe_trainer.train(&mut bpe)?;
let new_wordpiece = WordPiece::from_bpe(&bpe);
// Transfer the vocab
model.vocab = new_wordpiece.vocab;
model.vocab_r = new_wordpiece.vocab_r;
// The continuing_subword_prefix is the only other option to be overriden by the trainer
model.continuing_subword_prefix = new_wordpiece.continuing_subword_prefix;
Ok(special_tokens)
}
}
impl Trainer for WordPieceTrainer {
type Model = WordPiece;
fn train(&self, model: &mut WordPiece) -> Result<Vec<AddedToken>> {
self.train(model)
}
fn should_show_progress(&self) -> bool {
self.bpe_trainer.should_show_progress()
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> Result<Vec<String>> + Sync,
{
self.bpe_trainer.feed(iterator, process)
}
}
|
tokenizers/tokenizers/src/models/wordpiece/trainer.rs/0
|
{
"file_path": "tokenizers/tokenizers/src/models/wordpiece/trainer.rs",
"repo_id": "tokenizers",
"token_count": 2499
}
| 257
|
use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
use unicode_categories::UnicodeCategories;
fn is_punc(x: char) -> bool {
char::is_ascii_punctuation(&x) || x.is_punctuation()
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Punctuation {
#[serde(default = "default_split")]
behavior: SplitDelimiterBehavior,
}
fn default_split() -> SplitDelimiterBehavior {
SplitDelimiterBehavior::Isolated
}
impl Punctuation {
pub fn new(behavior: SplitDelimiterBehavior) -> Self {
Self { behavior }
}
}
impl Default for Punctuation {
fn default() -> Self {
Self::new(SplitDelimiterBehavior::Isolated)
}
}
impl PreTokenizer for Punctuation {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, s| s.split(is_punc, self.behavior))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn punctuation_basic() {
let pretok = Punctuation::default();
let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey friend", (0, 10)),
("!", (10, 11)),
(" How are you", (11, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
);
}
#[test]
fn deserialization() {
let punctuation: Punctuation = serde_json::from_str(r#"{"type": "Punctuation"}"#).unwrap();
assert_eq!(punctuation, Punctuation::default());
assert_eq!(
punctuation,
Punctuation::new(SplitDelimiterBehavior::Isolated)
);
}
#[test]
#[should_panic]
fn deserialization_erroneous() {
let _punctuation: Punctuation =
serde_json::from_str(r#"{"type": "WhitespaceSplit"}"#).unwrap();
}
}
|
tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs/0
|
{
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs",
"repo_id": "tokenizers",
"token_count": 1102
}
| 258
|
use crate::utils::SysRegex;
use crate::{Offsets, Result};
use regex::Regex;
/// Pattern used to split a NormalizedString
pub trait Pattern {
/// Slice the given string in a list of pattern match positions, with
/// a boolean indicating whether this is a match or not.
///
/// This method *must* cover the whole string in its outputs, with
/// contiguous ordered slices.
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>>;
}
impl Pattern for char {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
let is_char = |c: char| -> bool { c == *self };
is_char.find_matches(inside)
}
}
impl Pattern for &str {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if self.is_empty() {
// If we try to find the matches with an empty string, just don't match anything
return Ok(vec![((0, inside.chars().count()), false)]);
}
let re = Regex::new(®ex::escape(self))?;
(&re).find_matches(inside)
}
}
impl Pattern for &String {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
let s: &str = self;
s.find_matches(inside)
}
}
impl Pattern for &Regex {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if inside.is_empty() {
return Ok(vec![((0, 0), false)]);
}
let mut prev = 0;
let mut splits = Vec::with_capacity(inside.len());
for m in self.find_iter(inside) {
if prev != m.start() {
splits.push(((prev, m.start()), false));
}
splits.push(((m.start(), m.end()), true));
prev = m.end();
}
if prev != inside.len() {
splits.push(((prev, inside.len()), false))
}
Ok(splits)
}
}
impl Pattern for &SysRegex {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if inside.is_empty() {
return Ok(vec![((0, 0), false)]);
}
let mut prev = 0;
let mut splits = Vec::with_capacity(inside.len());
for (start, end) in self.find_iter(inside) {
if prev != start {
splits.push(((prev, start), false));
}
splits.push(((start, end), true));
prev = end;
}
if prev != inside.len() {
splits.push(((prev, inside.len()), false))
}
Ok(splits)
}
}
impl<F> Pattern for F
where
F: Fn(char) -> bool,
{
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if inside.is_empty() {
return Ok(vec![((0, 0), false)]);
}
let mut last_offset = 0;
let mut last_seen = 0;
let mut matches = inside
.char_indices()
.flat_map(|(b, c)| {
last_seen = b + c.len_utf8();
if self(c) {
let mut events = Vec::with_capacity(2);
if last_offset < b {
// We need to emit what was before this match
events.push(((last_offset, b), false));
}
events.push(((b, b + c.len_utf8()), true));
last_offset = b + c.len_utf8();
events
} else {
vec![]
}
})
.collect::<Vec<_>>();
// Do not forget the last potential split
if last_seen > last_offset {
matches.push(((last_offset, last_seen), false));
}
Ok(matches)
}
}
/// Invert the `is_match` flags for the wrapped Pattern. This is usefull
/// for example when we use a regex that matches words instead of a delimiter,
/// and we want to match the delimiter.
pub struct Invert<P: Pattern>(pub P);
impl<P: Pattern> Pattern for Invert<P> {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
Ok(self
.0
.find_matches(inside)?
.into_iter()
.map(|(offsets, flag)| (offsets, !flag))
.collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
use regex::Regex;
macro_rules! do_test {
($inside: expr, $pattern: expr => @ERROR) => {
assert!($pattern.find_matches($inside).is_err());
};
($inside: expr, $pattern: expr => $result: expr) => {
assert_eq!($pattern.find_matches($inside).unwrap(), $result);
assert_eq!(
Invert($pattern).find_matches($inside).unwrap(),
$result
.into_iter()
.map(|v: (Offsets, bool)| (v.0, !v.1))
.collect::<Vec<_>>()
);
};
}
#[test]
fn char() {
do_test!("aba", 'a' => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]);
do_test!("bbbba", 'a' => vec![((0, 4), false), ((4, 5), true)]);
do_test!("aabbb", 'a' => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]);
do_test!("", 'a' => vec![((0, 0), false)]);
do_test!("aaa", 'b' => vec![((0, 3), false)]);
}
#[test]
fn str() {
do_test!("aba", "a" => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]);
do_test!("bbbba", "a" => vec![((0, 4), false), ((4, 5), true)]);
do_test!("aabbb", "a" => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]);
do_test!("aabbb", "ab" => vec![((0, 1), false), ((1, 3), true), ((3, 5), false)]);
do_test!("aabbab", "ab" =>
vec![((0, 1), false), ((1, 3), true), ((3, 4), false), ((4, 6), true)]
);
do_test!("", "" => vec![((0, 0), false)]);
do_test!("aaa", "" => vec![((0, 3), false)]);
do_test!("aaa", "b" => vec![((0, 3), false)]);
}
#[test]
fn functions() {
let is_b = |c| c == 'b';
do_test!("aba", is_b => vec![((0, 1), false), ((1, 2), true), ((2, 3), false)]);
do_test!("aaaab", is_b => vec![((0, 4), false), ((4, 5), true)]);
do_test!("bbaaa", is_b => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]);
do_test!("", is_b => vec![((0, 0), false)]);
do_test!("aaa", is_b => vec![((0, 3), false)]);
}
#[test]
fn regex() {
let is_whitespace = Regex::new(r"\s+").unwrap();
do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]);
do_test!(" a b ", &is_whitespace =>
vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)]
);
do_test!("", &is_whitespace => vec![((0, 0), false)]);
do_test!("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘", &is_whitespace =>
vec![((0, 16), false), ((16, 17), true), ((17, 45), false)]
);
do_test!("aaa", &is_whitespace => vec![((0, 3), false)]);
}
#[test]
fn sys_regex() {
let is_whitespace = SysRegex::new(r"\s+").unwrap();
do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]);
do_test!(" a b ", &is_whitespace =>
vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)]
);
do_test!("", &is_whitespace => vec![((0, 0), false)]);
do_test!("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘", &is_whitespace =>
vec![((0, 16), false), ((16, 17), true), ((17, 45), false)]
);
do_test!("aaa", &is_whitespace => vec![((0, 3), false)]);
}
}
|
tokenizers/tokenizers/src/tokenizer/pattern.rs/0
|
{
"file_path": "tokenizers/tokenizers/src/tokenizer/pattern.rs",
"repo_id": "tokenizers",
"token_count": 3903
}
| 259
|
#![cfg(feature = "http")]
use tokenizers::{FromPretrainedParameters, Result, Tokenizer};
#[test]
fn test_from_pretrained() -> Result<()> {
let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None)?;
let encoding = tokenizer.encode("Hey there dear friend!", false)?;
assert_eq!(
encoding.get_tokens(),
&["Hey", "there", "dear", "friend", "!"]
);
Ok(())
}
#[test]
fn test_from_pretrained_revision() -> Result<()> {
let tokenizer = Tokenizer::from_pretrained("anthony/tokenizers-test", None)?;
let encoding = tokenizer.encode("Hey there dear friend!", false)?;
assert_eq!(
encoding.get_tokens(),
&["hey", "there", "dear", "friend", "!"]
);
let tokenizer = Tokenizer::from_pretrained(
"anthony/tokenizers-test",
Some(FromPretrainedParameters {
revision: "gpt-2".to_string(),
..Default::default()
}),
)?;
let encoding = tokenizer.encode("Hey there dear friend!", false)?;
assert_eq!(
encoding.get_tokens(),
&["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"]
);
Ok(())
}
#[test]
fn test_from_pretrained_invalid_model() {
let tokenizer = Tokenizer::from_pretrained("docs?", None);
assert!(tokenizer.is_err());
}
#[test]
fn test_from_pretrained_invalid_revision() {
let tokenizer = Tokenizer::from_pretrained(
"bert-base-cased",
Some(FromPretrainedParameters {
revision: "gpt?".to_string(),
..Default::default()
}),
);
assert!(tokenizer.is_err());
}
|
tokenizers/tokenizers/tests/from_pretrained.rs/0
|
{
"file_path": "tokenizers/tokenizers/tests/from_pretrained.rs",
"repo_id": "tokenizers",
"token_count": 683
}
| 260
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.