text stringlengths 5 631k | id stringlengths 14 178 | metadata dict | __index_level_0__ int64 0 647 |
|---|---|---|---|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch DeepseekVL model."""
import re
import tempfile
import unittest
from transformers import (
AutoProcessor,
DeepseekVLConfig,
DeepseekVLForConditionalGeneration,
DeepseekVLModel,
is_torch_available,
)
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
class DeepseekVLModelTester:
def __init__(
self,
parent,
batch_size=2,
seq_length=25,
num_channels=3,
initializer_range=0.02,
is_training=True,
use_cache=False,
text_config={
"num_hidden_layers": 2,
"vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 37,
"max_position_embeddings": 512,
"num_attention_heads": 4,
"pad_token_id": 1,
},
vision_config={
"num_hidden_layers": 1,
"hidden_size": 16,
"intermediate_size": 37,
"image_size": 32,
"patch_size": 8,
"hidden_act": "gelu",
"vision_use_head": False,
"num_attention_heads": 4,
},
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.num_channels = num_channels
self.initializer_range = initializer_range
self.is_training = is_training
self.use_cache = use_cache
self.text_config = text_config
self.vision_config = vision_config
self.vision_config["num_channels"] = self.num_channels
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.image_size = vision_config["image_size"]
self.num_image_tokens = 16
self.pad_token_id = text_config["pad_token_id"]
self.image_token_id = 0
def get_config(self):
return DeepseekVLConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
# create text and vision inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
pixel_values = floats_tensor(
[
self.batch_size,
self.num_channels,
self.image_size,
self.image_size,
]
)
# fill image_tokens
input_ids[input_ids == self.num_image_tokens] = config.text_config.pad_token_id
input_ids[:, : self.num_image_tokens] = self.image_token_id
return config, input_ids, attention_mask, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class DeepseekVLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (DeepseekVLModel, DeepseekVLForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": DeepseekVLModel,
"image-text-to-text": DeepseekVLForConditionalGeneration,
}
if is_torch_available()
else {}
)
_is_composite = True
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = DeepseekVLModelTester(self)
self.config_tester = ConfigTester(self, config_class=DeepseekVLConfig, has_text_modality=False)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for VLMs.
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
@unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation")
# Copied from tests.models.siglip.test_modeling_siglip.SiglipVisionModelTest.test_initialization
def test_initialization(self):
pass
# Copied from tests.models.janus.test_modeling_janus.JanusVisionText2TextModelTest.test_sdpa_can_dispatch_composite_models
def test_sdpa_can_dispatch_composite_models(self):
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# Load the model with SDPA
model_sdpa = model_class.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
# Load model with eager attention
model_eager = model_class.from_pretrained(
tmpdirname,
attn_implementation="eager",
)
model_eager = model_eager.eval().to(torch_device)
# SigLip has one shared cls attr for all models, so we assign both submodels heer
vision_attn = language_attn = "sdpa" if model._supports_sdpa else "eager"
if hasattr(model_sdpa, "vision_model") and hasattr(model_sdpa, "language_model"):
self.assertTrue(model_sdpa.vision_model.config._attn_implementation == vision_attn)
self.assertTrue(model_sdpa.language_model.config._attn_implementation == language_attn)
self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if any(re.finditer(r"Attention(?!Pool)", class_name)):
self.assertTrue(submodule.config._attn_implementation == "eager")
for name, submodule in model_sdpa.named_modules():
class_name = submodule.__class__.__name__
if any(re.finditer(r"Attention(?!Pool)", class_name)):
self.assertTrue(submodule.config._attn_implementation == "sdpa")
@require_torch
@require_torch_accelerator
@slow
class DeepseekVLIntegrationTest(unittest.TestCase):
def setUp(self):
self.model_id = "deepseek-community/deepseek-vl-1.3b-chat"
def test_model_text_generation(self):
model = DeepseekVLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto")
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "Describe this image."},
],
}
]
EXPECTED_TEXT = 'You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: Describe this image.\n\nAssistant:In the image, a majestic snow leopard is captured in a moment of tranquility. The snow leopard' # fmt: skip
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(
text,
EXPECTED_TEXT,
)
def test_model_text_generation_batched(self):
model = DeepseekVLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto")
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "Describe this image."},
],
}
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "What animal do you see in the image?"},
],
}
],
]
EXPECTED_TEXT = [
"You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: Describe this image.\n\nAssistant:In the image, a majestic snow leopard is captured in a moment of tranquility. The snow leopard", # fmt: skip
"You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: What animal do you see in the image?\n\nAssistant:I see a bear in the image.What is the significance of the color red in the", # fmt: skip
]
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, padding=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, text)
def test_model_text_generation_with_multi_image(self):
model = DeepseekVLForConditionalGeneration.from_pretrained(self.model_id, dtype="auto", device_map="auto")
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "What's the difference between"},
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": " and "},
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
],
}
]
EXPECTED_TEXT = "You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: What's the difference between and \n\nAssistant:The image is a photograph featuring two cats lying on a pink blanket. The cat on the left is" # fmt: skip
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(
text,
EXPECTED_TEXT,
)
| transformers/tests/models/deepseek_vl/test_modeling_deepseek_vl.py/0 | {
"file_path": "transformers/tests/models/deepseek_vl/test_modeling_deepseek_vl.py",
"repo_id": "transformers",
"token_count": 6685
} | 557 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch DepthPro model."""
import unittest
import pytest
from transformers import DepthProConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import DepthProForDepthEstimation, DepthProModel
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
if is_vision_available():
from PIL import Image
from transformers import DepthProImageProcessor
class DepthProModelTester:
def __init__(
self,
parent,
batch_size=8,
image_size=64,
patch_size=16,
num_channels=3,
is_training=True,
use_labels=True,
fusion_hidden_size=16,
intermediate_hook_ids=[1, 0],
intermediate_feature_dims=[10, 8],
scaled_images_ratios=[0.5, 1.0],
scaled_images_overlap_ratios=[0.0, 0.2],
scaled_images_feature_dims=[12, 12],
initializer_range=0.02,
use_fov_model=False,
image_model_config={
"model_type": "dinov2",
"num_hidden_layers": 2,
"hidden_size": 16,
"num_attention_heads": 1,
"patch_size": 4,
},
patch_model_config={
"model_type": "vit",
"num_hidden_layers": 2,
"hidden_size": 24,
"num_attention_heads": 2,
"patch_size": 6,
},
fov_model_config={
"model_type": "vit",
"num_hidden_layers": 2,
"hidden_size": 32,
"num_attention_heads": 4,
"patch_size": 8,
},
num_labels=3,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.fusion_hidden_size = fusion_hidden_size
self.intermediate_hook_ids = intermediate_hook_ids
self.intermediate_feature_dims = intermediate_feature_dims
self.scaled_images_ratios = scaled_images_ratios
self.scaled_images_overlap_ratios = scaled_images_overlap_ratios
self.scaled_images_feature_dims = scaled_images_feature_dims
self.initializer_range = initializer_range
self.use_fov_model = use_fov_model
self.image_model_config = image_model_config
self.patch_model_config = patch_model_config
self.fov_model_config = fov_model_config
self.num_labels = num_labels
self.hidden_size = image_model_config["hidden_size"]
self.num_hidden_layers = image_model_config["num_hidden_layers"]
self.num_attention_heads = image_model_config["num_attention_heads"]
# may be different for a backbone other than dinov2
self.out_size = patch_size // image_model_config["patch_size"]
self.seq_length = self.out_size**2 + 1 # we add 1 for the [CLS] token
n_fusion_blocks = len(intermediate_hook_ids) + len(scaled_images_ratios)
self.expected_depth_size = 2 ** (n_fusion_blocks + 1) * self.out_size
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return DepthProConfig(
patch_size=self.patch_size,
fusion_hidden_size=self.fusion_hidden_size,
intermediate_hook_ids=self.intermediate_hook_ids,
intermediate_feature_dims=self.intermediate_feature_dims,
scaled_images_ratios=self.scaled_images_ratios,
scaled_images_overlap_ratios=self.scaled_images_overlap_ratios,
scaled_images_feature_dims=self.scaled_images_feature_dims,
initializer_range=self.initializer_range,
image_model_config=self.image_model_config,
patch_model_config=self.patch_model_config,
fov_model_config=self.fov_model_config,
use_fov_model=self.use_fov_model,
)
def create_and_check_model(self, config, pixel_values, labels):
model = DepthProModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_depth_estimation(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = DepthProForDepthEstimation(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.predicted_depth.shape, (self.batch_size, self.expected_depth_size, self.expected_depth_size)
)
def create_and_check_for_fov(self, config, pixel_values, labels):
model = DepthProForDepthEstimation(config, use_fov_model=True)
model.to(torch_device)
model.eval()
# check if the fov_model (DinoV2-based encoder) is created
self.parent.assertIsNotNone(model.fov_model)
batched_pixel_values = pixel_values
row_pixel_values = pixel_values[:1]
with torch.no_grad():
model_batched_output_fov = model(batched_pixel_values).field_of_view
model_row_output_fov = model(row_pixel_values).field_of_view
# check if fov is returned
self.parent.assertIsNotNone(model_batched_output_fov)
self.parent.assertIsNotNone(model_row_output_fov)
# check output shape consistency for fov
self.parent.assertEqual(model_batched_output_fov.shape, (self.batch_size,))
# check equivalence between batched and single row outputs for fov
diff = torch.max(torch.abs(model_row_output_fov - model_batched_output_fov[:1]))
model_name = model.__class__.__name__
self.parent.assertTrue(
diff <= 1e-03,
msg=(f"Batched and Single row outputs are not equal in {model_name} for fov. Difference={diff}."),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class DepthProModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as DepthPro does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (DepthProModel, DepthProForDepthEstimation) if is_torch_available() else ()
pipeline_model_mapping = (
{
"depth-estimation": DepthProForDepthEstimation,
"image-feature-extraction": DepthProModel,
}
if is_torch_available()
else {}
)
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_torch_exportable = True
def setUp(self):
self.model_tester = DepthProModelTester(self)
self.config_tester = ConfigTester(self, config_class=DepthProConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Inductor error: name 'OpaqueUnaryFn_log2' is not defined")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(reason="DepthPro does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_depth_estimation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs)
def test_for_fov(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_fov(*config_and_inputs)
def test_training(self):
for model_class in self.all_model_classes:
if model_class.__name__ == "DepthProForDepthEstimation":
continue
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
for model_class in self.all_model_classes:
if model_class.__name__ == "DepthProForDepthEstimation":
continue
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue
model = model_class(config)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
non_uniform_init_parms = [
# these encoders are vision transformers
# any layer outside these encoders is either Conv2d or ConvTranspose2d
# which use kaiming initialization
"patch_encoder",
"image_encoder",
"fov_model.encoder",
]
if param.requires_grad:
if any(x in name for x in non_uniform_init_parms):
# See PR #38607 (to avoid flakiness)
data = torch.flatten(param.data)
n_elements = torch.numel(data)
# skip 2.5% of elements on each side to avoid issues caused by `nn.init.trunc_normal_` described in
# https://github.com/huggingface/transformers/pull/27906#issuecomment-1846951332
n_elements_to_skip_on_each_side = int(n_elements * 0.025)
data_to_check = torch.sort(data).values
if n_elements_to_skip_on_each_side > 0:
data_to_check = data_to_check[
n_elements_to_skip_on_each_side:-n_elements_to_skip_on_each_side
]
self.assertIn(
((data_to_check.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# this started when switched from normal initialization to kaiming_normal initialization
# maybe because the magnitude of offset values from ViT-encoders increases when followed by many convolution layers
def test_batching_equivalence(self, atol=1e-4, rtol=1e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
@slow
def test_model_from_pretrained(self):
model_path = "apple/DepthPro-hf"
model = DepthProModel.from_pretrained(model_path)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
@slow
class DepthProModelIntegrationTest(unittest.TestCase):
def test_inference_depth_estimation(self):
model_path = "apple/DepthPro-hf"
image_processor = DepthProImageProcessor.from_pretrained(model_path)
model = DepthProForDepthEstimation.from_pretrained(model_path).to(torch_device)
config = model.config
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the predicted depth
n_fusion_blocks = len(config.intermediate_hook_ids) + len(config.scaled_images_ratios)
out_size = config.image_model_config.image_size // config.image_model_config.patch_size
expected_depth_size = 2 ** (n_fusion_blocks + 1) * out_size
expected_shape = torch.Size((1, expected_depth_size, expected_depth_size))
self.assertEqual(outputs.predicted_depth.shape, expected_shape)
expected_slice = torch.tensor(
[[1.0582, 1.1225, 1.1335], [1.1154, 1.1398, 1.1486], [1.1434, 1.1500, 1.1643]]
).to(torch_device)
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4, rtol=1e-4)
# verify the predicted fov
expected_shape = torch.Size((1,))
self.assertEqual(outputs.field_of_view.shape, expected_shape)
expected_slice = torch.tensor([47.2459]).to(torch_device)
torch.testing.assert_close(outputs.field_of_view, expected_slice, atol=1e-4, rtol=1e-4)
def test_post_processing_depth_estimation(self):
model_path = "apple/DepthPro-hf"
image_processor = DepthProImageProcessor.from_pretrained(model_path)
model = DepthProForDepthEstimation.from_pretrained(model_path)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
# forward pass
with torch.no_grad():
outputs = model(**inputs)
outputs = image_processor.post_process_depth_estimation(
outputs,
target_sizes=[[image.height, image.width]],
)
predicted_depth = outputs[0]["predicted_depth"]
expected_shape = torch.Size((image.height, image.width))
self.assertTrue(predicted_depth.shape == expected_shape)
| transformers/tests/models/depth_pro/test_modeling_depth_pro.py/0 | {
"file_path": "transformers/tests/models/depth_pro/test_modeling_depth_pro.py",
"repo_id": "transformers",
"token_count": 7562
} | 558 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Dinov2WithRegisters model."""
import unittest
from transformers import Dinov2WithRegistersConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
Dinov2WithRegistersBackbone,
Dinov2WithRegistersForImageClassification,
Dinov2WithRegistersModel,
)
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class Dinov2WithRegistersModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_register_tokens=2,
mask_ratio=0.5,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_register_tokens = num_register_tokens
self.scope = scope
# in DINOv2 with Registers, the seq length equals the number of patches + 1 + num_register_tokens (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1 + self.num_register_tokens
self.mask_ratio = mask_ratio
self.num_masks = int(mask_ratio * self.seq_length)
self.mask_length = num_patches
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return Dinov2WithRegistersConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
is_decoder=False,
initializer_range=self.initializer_range,
num_register_tokens=self.num_register_tokens,
)
def create_and_check_model(self, config, pixel_values, labels):
model = Dinov2WithRegistersModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_backbone(self, config, pixel_values, labels):
model = Dinov2WithRegistersBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
expected_size = self.image_size // config.patch_size
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size]
)
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
# verify backbone works with out_features=None
config.out_features = None
model = Dinov2WithRegistersBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size]
)
# verify channels
self.parent.assertEqual(len(model.channels), 1)
# verify backbone works with apply_layernorm=False and reshape_hidden_states=False
config.apply_layernorm = False
config.reshape_hidden_states = False
model = Dinov2WithRegistersBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, self.seq_length, self.hidden_size]
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.type_sequence_label_size
model = Dinov2WithRegistersForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
config.num_channels = 1
model = Dinov2WithRegistersForImageClassification(config)
model.to(torch_device)
model.eval()
pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
labels,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class Dinov2WithRegistersModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Dinov2WithRegisters does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(
Dinov2WithRegistersModel,
Dinov2WithRegistersForImageClassification,
Dinov2WithRegistersBackbone,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"image-feature-extraction": Dinov2WithRegistersModel,
"image-classification": Dinov2WithRegistersForImageClassification,
}
if is_torch_available()
else {}
)
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_torch_exportable = True
def setUp(self):
self.model_tester = Dinov2WithRegistersModelTester(self)
self.config_tester = ConfigTester(
self, config_class=Dinov2WithRegistersConfig, has_text_modality=False, hidden_size=37
)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad and "register_tokens" not in name:
# See PR #38607 (to avoid flakiness)
data = torch.flatten(param.data)
n_elements = torch.numel(data)
# skip 2.5% of elements on each side to avoid issues caused by `nn.init.trunc_normal_` described in
# https://github.com/huggingface/transformers/pull/27906#issuecomment-1846951332
n_elements_to_skip_on_each_side = int(n_elements * 0.025)
data_to_check = torch.sort(data).values
if n_elements_to_skip_on_each_side > 0:
data_to_check = data_to_check[n_elements_to_skip_on_each_side:-n_elements_to_skip_on_each_side]
self.assertIn(
((data_to_check.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Dinov2WithRegisters does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@unittest.skip(reason="Dinov2WithRegisters does not support feedforward chunking yet")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "facebook/dinov2-with-registers-base"
model = Dinov2WithRegistersModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class Dinov2WithRegistersModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
if is_vision_available()
else None
)
@slow
def test_inference_no_head(self):
model = Dinov2WithRegistersModel.from_pretrained("facebook/dinov2-with-registers-base").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the last hidden states
# in DINOv2 with Registers, the seq length equals the number of patches + 1 + num_register_tokens (we add 1 for the [CLS] token)
num_patches = (image_processor.crop_size["height"] // model.config.patch_size) ** 2
expected_seq_length = num_patches + 1 + model.config.num_register_tokens
expected_shape = torch.Size((1, expected_seq_length, model.config.hidden_size))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[-0.4636, -1.4582, -0.0274], [-1.4738, -0.8858, 0.3002], [0.0714, -0.2407, -1.5940]],
device=torch_device,
)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
class Dinov2WithRegistersBackboneTest(unittest.TestCase, BackboneTesterMixin):
all_model_classes = (Dinov2WithRegistersBackbone,) if is_torch_available() else ()
config_class = Dinov2WithRegistersConfig
has_attentions = False
def setUp(self):
self.model_tester = Dinov2WithRegistersModelTester(self)
| transformers/tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py/0 | {
"file_path": "transformers/tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py",
"repo_id": "transformers",
"token_count": 6228
} | 559 |
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from transformers import DonutImageProcessor, DonutProcessor, XLMRobertaTokenizerFast
from ...test_processing_common import ProcessorTesterMixin
class DonutProcessorTest(ProcessorTesterMixin, unittest.TestCase):
from_pretrained_id = "naver-clova-ix/donut-base"
processor_class = DonutProcessor
@classmethod
def setUpClass(cls):
cls.processor = DonutProcessor.from_pretrained(cls.from_pretrained_id)
cls.tmpdirname = tempfile.mkdtemp()
image_processor = DonutImageProcessor()
tokenizer = XLMRobertaTokenizerFast.from_pretrained(cls.from_pretrained_id)
processor = DonutProcessor(image_processor, tokenizer)
processor.save_pretrained(cls.tmpdirname)
def test_token2json(self):
expected_json = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
"multiline": "text\nwith\nnewlines",
"empty": "",
}
sequence = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
"<s_multiline>text\nwith\nnewlines</s_multiline>"
"<s_empty></s_empty>"
)
actual_json = self.processor.token2json(sequence)
self.assertDictEqual(actual_json, expected_json)
| transformers/tests/models/donut/test_processing_donut.py/0 | {
"file_path": "transformers/tests/models/donut/test_processing_donut.py",
"repo_id": "transformers",
"token_count": 930
} | 560 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch EfficientNet model."""
import unittest
from transformers import EfficientNetConfig
from transformers.testing_utils import is_pipeline_test, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EfficientNetForImageClassification, EfficientNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class EfficientNetModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
num_channels=3,
kernel_sizes=[3, 3, 5],
in_channels=[32, 16, 24],
out_channels=[16, 24, 20],
strides=[1, 1, 2],
num_block_repeats=[1, 1, 2],
expand_ratios=[1, 6, 6],
is_training=True,
use_labels=True,
intermediate_size=37,
hidden_act="gelu",
num_labels=10,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.kernel_sizes = kernel_sizes
self.in_channels = in_channels
self.out_channels = out_channels
self.strides = strides
self.num_block_repeats = num_block_repeats
self.expand_ratios = expand_ratios
self.is_training = is_training
self.hidden_act = hidden_act
self.num_labels = num_labels
self.use_labels = use_labels
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return EfficientNetConfig(
image_size=self.image_size,
num_channels=self.num_channels,
kernel_sizes=self.kernel_sizes,
in_channels=self.in_channels,
out_channels=self.out_channels,
strides=self.strides,
num_block_repeats=self.num_block_repeats,
expand_ratios=self.expand_ratios,
hidden_act=self.hidden_act,
num_labels=self.num_labels,
)
def create_and_check_model(self, config, pixel_values, labels):
model = EfficientNetModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected last hidden states: B, C, H // 4, W // 4
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, config.hidden_dim, self.image_size // 4, self.image_size // 4),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
model = EfficientNetForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class EfficientNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as EfficientNet does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (EfficientNetModel, EfficientNetForImageClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": EfficientNetModel, "image-classification": EfficientNetForImageClassification}
if is_torch_available()
else {}
)
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
has_attentions = False
test_torch_exportable = True
def setUp(self):
self.model_tester = EfficientNetModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=EfficientNetConfig,
has_text_modality=False,
hidden_size=37,
common_properties=["num_channels", "image_size", "hidden_dim"],
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientNet does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="EfficientNet does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="EfficientNet does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
num_blocks = sum(config.num_block_repeats) * 4
self.assertEqual(len(hidden_states), num_blocks)
# EfficientNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 2, self.model_tester.image_size // 2],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "google/efficientnet-b7"
model = EfficientNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@is_pipeline_test
@require_vision
@slow
def test_pipeline_image_feature_extraction(self):
super().test_pipeline_image_feature_extraction()
@is_pipeline_test
@require_vision
@slow
def test_pipeline_image_feature_extraction_fp16(self):
super().test_pipeline_image_feature_extraction_fp16()
@is_pipeline_test
@require_vision
@slow
def test_pipeline_image_classification(self):
super().test_pipeline_image_classification()
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class EfficientNetModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained("google/efficientnet-b7") if is_vision_available() else None
@slow
def test_inference_image_classification_head(self):
model = EfficientNetForImageClassification.from_pretrained("google/efficientnet-b7").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.2962, 0.4487, 0.4499]).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
| transformers/tests/models/efficientnet/test_modeling_efficientnet.py/0 | {
"file_path": "transformers/tests/models/efficientnet/test_modeling_efficientnet.py",
"repo_id": "transformers",
"token_count": 3901
} | 561 |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
from transformers import AutoProcessor, BartTokenizerFast, Florence2Processor
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_torch_available():
import torch
if is_vision_available():
from transformers import CLIPImageProcessor
@require_torch
@require_vision
class Florence2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Florence2Processor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = CLIPImageProcessor.from_pretrained("ducviet00/Florence-2-base-hf")
image_processor.image_seq_length = 0
tokenizer = BartTokenizerFast.from_pretrained("ducviet00/Florence-2-base-hf")
tokenizer.image_token = "<image>"
tokenizer.image_token_id = tokenizer.encode(tokenizer.image_token, add_special_tokens=False)[0]
tokenizer.extra_special_tokens = {"image_token": "<image>"}
processor_kwargs = cls.prepare_processor_dict()
processor = Florence2Processor(image_processor, tokenizer, **processor_kwargs)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
@staticmethod
def prepare_processor_dict():
return {
"post_processor_config": {
"ocr": {
"pattern": r"(.+?)<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>",
"area_threshold": 0.0,
},
"phrase_grounding": {"banned_grounding_tokens": ["the image"]},
"pure_text": {},
"description_with_bboxes": {},
"description_with_polygons": {},
"polygons": {},
"bboxes": {},
"description_with_bboxes_or_polygons": {},
}
}
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
def test_construct_prompts(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
# Test single text without task token
text = "This is a simple text."
prompts = processor._construct_prompts(text)
self.assertEqual(prompts, [text])
# Test list of texts with task without input
texts = ["<OCR>", "<CAPTION>"]
prompts = processor._construct_prompts(texts)
EXPECTED_PROMPTS_WITHOUT_INPUT = ["What is the text in the image?", "What does the image describe?"]
self.assertEqual(prompts, EXPECTED_PROMPTS_WITHOUT_INPUT)
# Test task with input
texts = ["<CAPTION_TO_PHRASE_GROUNDING> a red car"]
prompts = processor._construct_prompts(texts)
EXPECTED_PROMPTS_WITH_INPUT = ["Locate the phrases in the caption: a red car"]
self.assertEqual(prompts, EXPECTED_PROMPTS_WITH_INPUT)
# Test invalid prompt with task token not alone
with self.assertRaises(ValueError):
processor._construct_prompts("<OCR> extra text")
def test_quantizer_quantize_dequantize(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
# Test bounding box quantization and dequantization
boxes = torch.tensor([[0, 0, 30, 40], [500, 550, 600, 690], [750, 1121, 851, 1239]], dtype=torch.int32)
size = (800, 1200)
quantized_boxes = processor.post_processor.quantize(boxes, size)
dequantized_boxes = processor.post_processor.dequantize(quantized_boxes, size)
EXPECTED_DEQUANTIZED_BBOX = torch.tensor(
[[0, 0, 30, 40], [500, 550, 600, 690], [750, 1121, 799, 1199]], dtype=torch.int32
)
self.assertTrue(torch.allclose(dequantized_boxes, EXPECTED_DEQUANTIZED_BBOX))
# Test points quantization and dequantization
points = torch.tensor([[0, 0], [300, 400], [850, 1250]], dtype=torch.int32)
quantized_points = processor.post_processor.quantize(points, size)
dequantized_points = processor.post_processor.dequantize(quantized_points, size)
EXPECTED_DEQUANTIZED_POINTS = torch.tensor([[0, 0], [300, 400], [799, 1199]], dtype=torch.int32)
self.assertTrue(torch.allclose(dequantized_points, EXPECTED_DEQUANTIZED_POINTS))
# Test invalid shape
with self.assertRaises(ValueError):
processor.post_processor.quantize(torch.tensor([[1, 2, 3]]), size)
def test_post_process_parse_description_with_bboxes_from_text_and_spans(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
text_without_phrase = "</s><s><loc_53><loc_334><loc_933><loc_775><loc_711><loc_203><loc_906><loc_546><loc_585><loc_309><loc_774><loc_709><loc_577></s><pad>"
image_size = (1000, 1000)
parsed_text_without_phrase = processor.post_processor.parse_description_with_bboxes_from_text_and_spans(
text_without_phrase, image_size=image_size, allow_empty_phrase=True
)
EXPECTED_PARSED_TEXT_WITHOUT_PHRASE = [
{"bbox": [53, 334, 933, 775], "cat_name": ""},
{"bbox": [711, 203, 906, 546], "cat_name": ""},
{"bbox": [585, 309, 774, 709], "cat_name": ""},
]
self.assertEqual(parsed_text_without_phrase, EXPECTED_PARSED_TEXT_WITHOUT_PHRASE)
text_with_phrase = (
"</s><s>car<loc_53><loc_334><loc_933><loc_775>door handle<loc_425><loc_504><loc_474><loc_516></s><pad>"
)
image_size = (1000, 1000)
parsed_text_with_phrase = processor.post_processor.parse_description_with_bboxes_from_text_and_spans(
text_with_phrase, image_size=image_size, allow_empty_phrase=False
)
EXPECTED_PARSED_TEXT_WITH_PHRASE = [
{"bbox": [53, 334, 933, 775], "cat_name": "car"},
{"bbox": [425, 504, 474, 516], "cat_name": "door handle"},
]
self.assertEqual(parsed_text_with_phrase, EXPECTED_PARSED_TEXT_WITH_PHRASE)
def test_post_process_parse_description_with_polygons_from_text_and_spans(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
text_without_phrase = "<loc_279><loc_379><loc_282><loc_379><loc_290><loc_373><loc_293><loc_373><loc_298><loc_369><loc_301><loc_369>"
image_size = (1000, 1000)
parsed_text_without_phrase = processor.post_processor.parse_description_with_polygons_from_text_and_spans(
text_without_phrase, image_size=image_size, allow_empty_phrase=True
)
EXPECTED_PARSED_TEXT_WITHOUT_PHRASE = [
{
"cat_name": "",
"polygons": [[279, 379, 282, 379, 290, 373, 293, 373, 298, 369, 301, 369]],
}
]
self.assertEqual(parsed_text_without_phrase, EXPECTED_PARSED_TEXT_WITHOUT_PHRASE)
text_with_phrase = (
"Hello<loc_769><loc_248><loc_771><loc_234><loc_773><loc_206><loc_773><loc_198><loc_771><loc_193>"
)
image_size = (1000, 1000)
parsed_text_with_phrase = processor.post_processor.parse_description_with_polygons_from_text_and_spans(
text_with_phrase, image_size=image_size, allow_empty_phrase=False
)
EXPECTED_PARSED_TEXT_WITH_PHRASE = [
{
"cat_name": "Hello",
"polygons": [[769, 248, 771, 234, 773, 206, 773, 198, 771, 193]],
}
]
self.assertEqual(parsed_text_with_phrase, EXPECTED_PARSED_TEXT_WITH_PHRASE)
def test_post_process_parse_ocr_from_text_and_spans(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
text = "</s><s>Hello<loc_100><loc_100><loc_200><loc_100><loc_200><loc_200><loc_100><loc_200>World<loc_300><loc_300><loc_400><loc_300><loc_400><loc_400><loc_300><loc_400></s>"
image_size = (1000, 1000)
parsed = processor.post_processor.parse_ocr_from_text_and_spans(
text, pattern=None, image_size=image_size, area_threshold=0.0
)
EXPECTED_PARSED_OCR = [
{"quad_box": [100, 100, 200, 100, 200, 200, 100, 200], "text": "Hello"},
{"quad_box": [300, 300, 400, 300, 400, 400, 300, 400], "text": "World"},
]
self.assertEqual(parsed, EXPECTED_PARSED_OCR)
# Test with area threshold filtering
small_text = "Small<loc_1><loc_1><loc_2><loc_2><loc_2><loc_2><loc_1><loc_1>"
parsed_small = processor.post_processor.parse_ocr_from_text_and_spans(
small_text, pattern=None, image_size=image_size, area_threshold=0.01
)
EXPECTED_PARSED_OCR_SMALL = []
self.assertEqual(parsed_small, EXPECTED_PARSED_OCR_SMALL)
def test_post_process_parse_phrase_grounding_from_text_and_spans(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
text = "</s><s>red car<loc_53><loc_334><loc_933><loc_775><loc_711><loc_203><loc_906><loc_546>sky<loc_0><loc_0><loc_1000><loc_300></s>"
image_size = (1000, 1000)
parsed = processor.post_processor.parse_phrase_grounding_from_text_and_spans(text, image_size=image_size)
EXPECTED_PARSED_PHRASE_GROUNDING = [
{"bbox": [[53, 334, 933, 775], [711, 203, 906, 546]], "cat_name": "red car"},
{"bbox": [[0, 0, 1000, 300]], "cat_name": "sky"},
]
self.assertEqual(parsed, EXPECTED_PARSED_PHRASE_GROUNDING)
# Test with blacklisted phrase
blacklisted_text = "the image<loc_100><loc_100><loc_200><loc_200>"
parsed_blacklisted = processor.post_processor.parse_phrase_grounding_from_text_and_spans(
blacklisted_text, image_size=image_size
)
EXPECTED_PARSED_BLACKLISTED = []
self.assertEqual(parsed_blacklisted, EXPECTED_PARSED_BLACKLISTED)
def test_post_process_generation(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
# Test pure_text task
text = "<s>Hello world</s>"
cap_result = processor.post_process_generation(text=text, task="<CAPTION>", image_size=None)
EXPECTED_PURE_TEXT_RESULT = {"<CAPTION>": "Hello world"}
self.assertEqual(cap_result, EXPECTED_PURE_TEXT_RESULT)
# Test description_with_bboxes task
text = "car<loc_53><loc_334><loc_933><loc_775>"
od_result = processor.post_process_generation(text=text, task="<OD>", image_size=(1000, 1000))
EXPECTED_BBOXES_RESULT = {"<OD>": {"bboxes": [[53, 334, 933, 775]], "labels": ["car"]}}
self.assertEqual(od_result, EXPECTED_BBOXES_RESULT)
# Test OCR task
text = "Hello<loc_100><loc_100><loc_200><loc_100><loc_200><loc_200><loc_100><loc_200>"
ocr_result = processor.post_process_generation(text=text, task="<OCR_WITH_REGION>", image_size=(1000, 1000))
EXPECTED_OCR_RESULT = {
"<OCR_WITH_REGION>": {"quad_boxes": [[100, 100, 200, 100, 200, 200, 100, 200]], "labels": ["Hello"]}
}
self.assertEqual(ocr_result, EXPECTED_OCR_RESULT)
| transformers/tests/models/florence2/test_processing_florence2.py/0 | {
"file_path": "transformers/tests/models/florence2/test_processing_florence2.py",
"repo_id": "transformers",
"token_count": 5334
} | 562 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch GptOss model."""
import difflib
import inspect
import json
import os
import subprocess
import tempfile
import unittest
from pathlib import Path
import pytest
from parameterized import parameterized
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GptOssConfig,
is_torch_available,
)
from transformers.testing_utils import (
cleanup,
require_read_token,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
from ...test_configuration_common import ConfigTester
if is_torch_available():
import torch
from transformers import (
GptOssForCausalLM,
GptOssForSequenceClassification,
GptOssForTokenClassification,
GptOssModel,
)
NUM_GPUS = torch.cuda.device_count()
class GptOssModelTester(CausalLMModelTester):
if is_torch_available():
config_class = GptOssConfig
base_model_class = GptOssModel
causal_lm_class = GptOssForCausalLM
sequence_class = GptOssForSequenceClassification
token_class = GptOssForTokenClassification
pipeline_model_mapping = (
{
"feature-extraction": GptOssModel,
"text-classification": GptOssForSequenceClassification,
"text-generation": GptOssForCausalLM,
"token-classification": GptOssForTokenClassification,
}
if is_torch_available()
else {}
)
@require_torch
class GptOssModelTest(CausalLMModelTest, unittest.TestCase):
all_model_classes = (
(GptOssModel, GptOssForCausalLM, GptOssForSequenceClassification, GptOssForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": GptOssModel,
"text-classification": GptOssForSequenceClassification,
"text-generation": GptOssForCausalLM,
"token-classification": GptOssForTokenClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
model_tester_class = GptOssModelTester
def setUp(self):
self.model_tester = GptOssModelTester(self)
self.config_tester = ConfigTester(self, config_class=GptOssConfig, hidden_size=37)
@unittest.skip("Failing because of unique cache (HybridCache)")
def test_model_outputs_equivalence(self, **kwargs):
pass
@unittest.skip("GptOss's forcefully disables sdpa due to Sink")
def test_sdpa_can_dispatch_non_composite_models(self):
pass
@unittest.skip("GptOss's eager attn/sdpa attn outputs are expected to be different")
def test_eager_matches_sdpa_generate(self):
pass
@parameterized.expand([("random",), ("same",)])
@pytest.mark.generate
@unittest.skip("GptOss has HybridCache which is not compatible with assisted decoding")
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
pass
@unittest.skip("GptOss has HybridCache which is not compatible with assisted decoding")
def test_prompt_lookup_decoding_matches_greedy_search(self, assistant_type):
pass
@pytest.mark.generate
@unittest.skip("GptOss has HybridCache which is not compatible with assisted decoding")
def test_assisted_decoding_sample(self):
pass
@unittest.skip("GptOss has HybridCache which is not compatible with dola decoding")
def test_dola_decoding_sample(self):
pass
@unittest.skip("GptOss has HybridCache and doesn't support continue from past kv")
def test_generate_continue_from_past_key_values(self):
pass
@unittest.skip("GptOss has HybridCache and doesn't support contrastive generation")
def test_contrastive_generate(self):
pass
@unittest.skip("GptOss has HybridCache and doesn't support contrastive generation")
def test_contrastive_generate_dict_outputs_use_cache(self):
pass
@unittest.skip("GptOss has HybridCache and doesn't support contrastive generation")
def test_contrastive_generate_low_memory(self):
pass
@unittest.skip("GptOss has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.")
def test_generate_with_static_cache(self):
pass
@unittest.skip("GptOss has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
@unittest.skip("GptOss has HybridCache and doesn't support StaticCache. Though it could, it shouldn't support.")
def test_generate_continue_from_inputs_embeds(self):
pass
@unittest.skip(
reason="HybridCache can't be gathered because it is not iterable. Adding a simple iter and dumping `distributed_iterator`"
" as in Dynamic Cache doesn't work. NOTE: @gante all cache objects would need better compatibility with multi gpu setting"
)
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip("GptOss has HybridCache which auto-compiles. Compile and FA2 don't work together.")
def test_eager_matches_fa2_generate(self):
pass
@unittest.skip("GptOss eager/FA2 attention outputs are expected to be different")
def test_flash_attn_2_equivalence(self):
pass
@unittest.skip("Most probably because of the MOE, the moe and router does not ignore padding tokens")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("GptOss does not support flex officially")
def test_flex_attention_with_grads(self):
pass
RESULTS_PATH = Path(__file__).parent.parent.parent / "fixtures/gpt_oss/integration_tests.json"
# ------------------------
# Worker function for distributed torchrun
# ------------------------
def distributed_worker(quantized, model_size, kernels, attn_impl, mode):
"""This is the function that will be executed by torchrun workers."""
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.testing_utils import torch_device
def generate_config_key(quantized, model, kernels, attn_impl, mode):
"""Generate a key for the restructured integration test results."""
return f"quantized={str(quantized).lower()}|model={model}|kernels={str(kernels).lower()}|attn_impl={attn_impl}|mode={mode}"
input_text = [
"Roses are red, violets",
"How are you? Tell me the name of the president of",
]
# Convert args
quantized = quantized.lower() == "true"
kernels = kernels.lower() == "true"
# Distributed model loading
model_id = f"openai/gpt-oss-{model_size}"
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype="auto",
tp_plan="auto", # distributed inference
use_kernels=kernels,
).to(torch_device)
model.set_attn_implementation(attn_impl)
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
# Inference
inputs = tokenizer(input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_texts = tokenizer.batch_decode(output, skip_special_tokens=False)
# Only rank 0 writes results and validates against expected outputs
if int(os.environ.get("RANK", "0")) == 0:
# Generate key to look up expected outputs
key = generate_config_key(quantized, model_size, kernels, attn_impl, mode)
# Load expected outputs from restructured JSON
if os.path.exists(RESULTS_PATH):
with open(RESULTS_PATH, "r") as f:
expected_results = json.load(f)
# Check if we have expected results for this configuration
if key in expected_results:
expected_outputs = expected_results[key]
# Compare actual outputs with expected outputs
assert len(output_texts) == len(expected_outputs), f"Output length mismatch for {key}"
for i, (actual, expected) in enumerate(zip(output_texts, expected_outputs)):
actual_stripped = actual.strip()
expected_stripped = expected.strip()
# Make lengths match by taking minimum length to be resilient to generation differences
min_length = min(len(actual_stripped), len(expected_stripped))
actual_truncated = actual_stripped[:min_length]
expected_truncated = expected_stripped[:min_length]
if actual_truncated != expected_truncated:
diff = "\n".join(
difflib.unified_diff(
expected_truncated.splitlines(keepends=True),
actual_truncated.splitlines(keepends=True),
fromfile=f"expected[{i}]",
tofile=f"actual[{i}]",
lineterm="",
)
)
raise AssertionError(
f"Output mismatch at index {i} for {key}:\n"
f"Expected: '{expected_stripped}'\n"
f"Actual: '{actual_stripped}'\n"
f"Diff (truncated to min length {min_length}):\n{diff}"
)
print(f"✓ Outputs match expected results for {key}")
else:
print(f"Warning: No expected results found for configuration: {key}")
else:
print(f"Warning: Results file {RESULTS_PATH} not found")
@slow
@require_torch_accelerator
class GptOssIntegrationTest(unittest.TestCase):
input_text = [
"Roses are red, violets",
"How are you? Tell me the name of the president of",
]
@staticmethod
def generate_config_key(quantized, model, kernels, attn_impl, mode):
"""Generate a key for the restructured integration test results."""
return f"quantized={str(quantized).lower()}|model={model}|kernels={str(kernels).lower()}|attn_impl={attn_impl}|mode={mode}"
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
# ------------------------
# Non-distributed inference
# ------------------------
@staticmethod
def load_and_forward(model_id, attn_implementation, input_text, **pretrained_kwargs):
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation=attn_implementation,
**pretrained_kwargs,
)
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
inputs = tokenizer(input_text, return_tensors="pt", padding=True).to(model.device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
return output_text
# ------------------------
# Distributed inference using inspect
# ------------------------
@staticmethod
def run_distributed_test(quantized, model, kernels, attn_impl, mode):
"""Launch torchrun using a temporary worker file generated from inspect.getsource()."""
import textwrap
# Extract worker function source dynamically
worker_src = inspect.getsource(distributed_worker)
# Create a temp file that calls the worker
script_code = f"""
import sys
import json
RESULTS_PATH = "{RESULTS_PATH}"
{worker_src}
if __name__ == "__main__":
distributed_worker("{quantized}", "{model}", "{kernels}", "{attn_impl}", "{mode}")
"""
# Dedent for proper formatting
script_code = textwrap.dedent(script_code)
# Write to temp file
with tempfile.NamedTemporaryFile("w", suffix="_worker.py", delete=False) as tmp:
tmp.write(script_code)
tmp_path = tmp.name
# Launch torchrun
cmd = [
"torchrun",
f"--nproc_per_node={NUM_GPUS}",
tmp_path,
]
subprocess.run(cmd, check=True)
# Cleanup
os.remove(tmp_path)
# ------------------------
# Shared parameterization
# ------------------------
PARAMETERS = [
(False, "20b", False, "eager", "eval"),
(False, "20b", False, "eager", "train"),
(False, "20b", False, "kernels-community/vllm-flash-attn3", "eval"),
(False, "20b", False, "kernels-community/vllm-flash-attn3", "train"),
(False, "20b", True, "eager", "eval"),
(False, "20b", True, "eager", "train"),
(False, "20b", True, "kernels-community/vllm-flash-attn3", "eval"),
(False, "20b", True, "kernels-community/vllm-flash-attn3", "train"),
(True, "20b", False, "eager", "eval"),
(True, "20b", False, "eager", "train"),
(True, "20b", False, "kernels-community/vllm-flash-attn3", "eval"),
(True, "20b", False, "kernels-community/vllm-flash-attn3", "train"),
(True, "20b", True, "eager", "eval"),
(True, "20b", True, "eager", "train"),
(True, "20b", True, "kernels-community/vllm-flash-attn3", "eval"),
(True, "20b", True, "kernels-community/vllm-flash-attn3", "train"),
(False, "120b", False, "eager", "eval"),
(False, "120b", False, "eager", "train"),
(False, "120b", False, "kernels-community/vllm-flash-attn3", "eval"),
(False, "120b", False, "kernels-community/vllm-flash-attn3", "train"),
(False, "120b", True, "eager", "eval"),
(False, "120b", True, "eager", "train"),
(False, "120b", True, "kernels-community/vllm-flash-attn3", "eval"),
(False, "120b", True, "kernels-community/vllm-flash-attn3", "train"),
(True, "120b", False, "eager", "eval"),
(True, "120b", False, "eager", "train"),
(True, "120b", False, "kernels-community/vllm-flash-attn3", "eval"),
(True, "120b", False, "kernels-community/vllm-flash-attn3", "train"),
(True, "120b", True, "eager", "eval"),
(True, "120b", True, "eager", "train"),
(True, "120b", True, "kernels-community/vllm-flash-attn3", "eval"),
(True, "120b", True, "kernels-community/vllm-flash-attn3", "train"),
]
# ------------------------
# Non-distributed test
# ------------------------
@parameterized.expand(PARAMETERS)
@require_read_token
def test_model_outputs(self, quantized, model, kernels, attn_impl, mode):
model_id = f"openai/gpt-oss-{model}"
output_texts = self.load_and_forward(
model_id,
attn_impl,
self.input_text,
use_kernels=kernels,
)
# Generate key to look up expected outputs
key = self.generate_config_key(quantized, model, kernels, attn_impl, mode)
# Load expected outputs from restructured JSON
if os.path.exists(RESULTS_PATH):
with open(RESULTS_PATH, "r") as f:
expected_results = json.load(f)
# Check if we have expected results for this configuration
if key in expected_results:
expected_outputs = expected_results[key]
# Compare actual outputs with expected outputs
self.assertEqual(len(output_texts), len(expected_outputs), f"Output length mismatch for {key}")
for i, (actual, expected) in enumerate(zip(output_texts, expected_outputs)):
actual_stripped = actual.strip()
expected_stripped = expected.strip()
# Make lengths match by taking minimum length to be resilient to generation differences
min_length = min(len(actual_stripped), len(expected_stripped))
actual_truncated = actual_stripped[:min_length]
expected_truncated = expected_stripped[:min_length]
if actual_truncated != expected_truncated:
diff = "\n".join(
difflib.unified_diff(
expected_truncated.splitlines(keepends=True),
actual_truncated.splitlines(keepends=True),
fromfile=f"expected[{i}]",
tofile=f"actual[{i}]",
lineterm="",
)
)
self.fail(
f"Output mismatch at index {i} for {key}:\n"
f"Expected: '{expected_stripped}'\n"
f"Actual: '{actual_stripped}'\n"
f"Diff (truncated to min length {min_length}):\n{diff}"
)
else:
# If no expected results exist, this is a new configuration
# We could optionally add it to the results file here
print(f"Warning: No expected results found for configuration: {key}")
self.assertIsInstance(output_texts, list)
self.assertTrue(all(isinstance(x, str) for x in output_texts))
# ------------------------
# Distributed test
# ------------------------
@parameterized.expand(PARAMETERS)
@require_read_token
def test_model_outputs_distributed(self, quantized, model, kernels, attn_impl, mode):
self.run_distributed_test(quantized, model, kernels, attn_impl, mode)
# ------------------------
# Training test
# ------------------------
@parameterized.expand(PARAMETERS)
@require_read_token
def test_training_step(self, quantized, model, kernels, attn_impl, mode):
if mode != "train":
self.skipTest("This test is only for training mode.")
if quantized:
self.skipTest("Training test for quantized models is not supported.")
model_id = f"openai/gpt-oss-{model}"
model_obj = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation=attn_impl,
use_kernels=kernels,
)
model_obj.train()
tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left")
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(model_obj.device)
inputs["labels"] = inputs["input_ids"].clone()
outputs = model_obj(**inputs)
loss = outputs.loss
self.assertIsNotNone(loss)
loss.backward()
# Check that gradients were computed for all parameters that have a grad field
for name, param in model_obj.named_parameters():
if param.requires_grad:
self.assertIsNotNone(param.grad, f"Parameter '{name}' did not receive a gradient.")
# Check that gradients are not all zero
self.assertTrue(
torch.sum(torch.abs(param.grad)).item() > 0, f"Gradient for parameter '{name}' is all zeros."
)
def test_model_matches_original_20b(self):
input_text = "Roses are red, violets"
original_output = "Roses are red, violets are blue, I love you, and I love you too."
original_logprobs = torch.tensor(
[
-0.037353515625,
-0.08154296875,
-1.21875,
-1.953125,
-2.234375,
-0.96875,
-1.546875,
-1.640625,
-0.93359375,
-1.609375,
-1.625,
-0.85546875,
-1.7265625,
-0.7421875,
-2.078125,
-0.006561279296875,
-0.10498046875,
-0.1767578125,
-0.1240234375,
-0.099609375,
]
)
model_id = "openai/gpt-oss-20b"
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation="eager",
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokens = tokenizer(input_text)["input_ids"]
num_generated_tokens = 0
with torch.no_grad():
for i in range(12):
tensors = torch.as_tensor(tokens, dtype=torch.int32, device=model.device).unsqueeze(0)
logits = model(tensors).logits[0]
predicted_token = torch.argmax(logits[-1, :], dim=-1).item()
logprobs = torch.log_softmax(logits[-1, :], dim=-1)
selected_logprobs = logprobs[predicted_token]
tokens.append(predicted_token)
num_generated_tokens += 1
decoded_token = tokenizer.decode([predicted_token])
logprob_differences = selected_logprobs - original_logprobs[i]
print(
f"Generated token: {repr(decoded_token)}, logprob: {selected_logprobs}, logprob differences: {logprob_differences}"
)
torch.testing.assert_close(
selected_logprobs.cpu().to(original_logprobs.dtype), original_logprobs[i], atol=1e-1, rtol=1e-1
)
decoded_string = tokenizer.decode(tokens)
self.assertTrue(original_output.startswith(decoded_string))
def test_model_matches_original_120b(self):
input_text = "Roses are red, violets"
original_output = """Roses are red, violets are blue,
I am a language model, not a human being"""
original_logprobs = torch.tensor(
[
-0.90234375,
-0.66015625,
-1.546875,
-2.703125,
-2.078125,
-1.21875,
-2.484375,
-0.031982421875,
-0.84765625,
-1.890625,
-0.1923828125,
-2.046875,
-1.65625,
-1.3515625,
-1.1640625,
-0.3671875,
-1.9921875,
-1.5390625,
-1.46875,
-0.85546875,
]
)
model_id = "openai/gpt-oss-120b"
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.bfloat16,
device_map="auto",
attn_implementation="eager",
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokens = tokenizer(input_text)["input_ids"]
num_generated_tokens = 0
with torch.no_grad():
for i in range(12):
tensors = torch.as_tensor(tokens, dtype=torch.int32, device=model.device).unsqueeze(0)
logits = model(tensors).logits[0]
predicted_token = torch.argmax(logits[-1, :], dim=-1).item()
logprobs = torch.log_softmax(logits[-1, :], dim=-1)
selected_logprobs = logprobs[predicted_token]
tokens.append(predicted_token)
num_generated_tokens += 1
decoded_token = tokenizer.decode([predicted_token])
logprob_differences = selected_logprobs - original_logprobs[i]
print(
f"Generated token: {repr(decoded_token)}, logprob: {selected_logprobs}, logprob differences: {logprob_differences}"
)
torch.testing.assert_close(
selected_logprobs.cpu().to(original_logprobs.dtype), original_logprobs[i], atol=1e-1, rtol=1e-1
)
decoded_string = tokenizer.decode(tokens)
self.assertTrue(original_output.startswith(decoded_string))
| transformers/tests/models/gpt_oss/test_modeling_gpt_oss.py/0 | {
"file_path": "transformers/tests/models/gpt_oss/test_modeling_gpt_oss.py",
"repo_id": "transformers",
"token_count": 11486
} | 563 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Idefics3 model."""
import copy
import unittest
from io import BytesIO
import pytest
import requests
from transformers import (
AutoProcessor,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
cleanup,
require_bitsandbytes,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
Idefics3Config,
Idefics3ForConditionalGeneration,
Idefics3Model,
)
if is_vision_available():
from PIL import Image
class Idefics3VisionText2TextModelTester:
def __init__(
self,
parent,
is_training=True,
batch_size=2,
scale_factor=2,
num_images=2,
vision_config={
"image_size": 16,
"patch_size": 4,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 32,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
text_config={
"vocab_size": 100,
"hidden_size": 64,
"intermediate_size": 56,
"num_hidden_layers": 3,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"hidden_act": "silu",
"max_position_embeddings": 256,
"initializer_range": 0.02,
"rms_norm_eps": 1e-6,
"pad_token_id": 2,
"bos_token_id": 0,
"eos_token_id": 1,
"image_token_id": 57,
"tie_word_embeddings": False,
"rope_theta": 10000.0,
"sliding_window": 32,
"attention_dropout": 0.0,
},
use_cache=False,
tie_word_embeddings=False,
image_token_id=57,
):
self.parent = parent
self.pad_token_id = text_config["pad_token_id"]
self.is_training = is_training
self.batch_size = batch_size
self.num_images = num_images
self.scale_factor = scale_factor
self.seq_length = (
int(((vision_config["image_size"] // vision_config["patch_size"]) ** 2) / (self.scale_factor**2))
* self.num_images
)
self.use_cache = use_cache
self.image_token_id = image_token_id
self.tie_word_embeddings = tie_word_embeddings
# Hack - add properties here so use common tests
self.vocab_size = text_config["vocab_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.hidden_size = text_config["hidden_size"]
self.vision_config = vision_config
self.text_config = text_config
def get_config(self):
return Idefics3Config(
use_cache=self.use_cache,
image_token_id=self.image_token_id,
tie_word_embeddings=self.tie_word_embeddings,
vision_config=self.vision_config,
text_config=self.text_config,
vocab_size=self.vocab_size,
scale_factor=self.scale_factor,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.num_images,
3, # Idefics3ImageProcessor always generates RGB pixel values
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1
# For simplicity just set the last n tokens to the image token
n_image_tokens_per_batch = self.seq_length
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[:, -n_image_tokens_per_batch:] = self.image_token_id
attention_mask = input_ids.ne(1).to(torch_device)
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class Idefics3ModelTest(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `Idefics3`.
"""
all_model_classes = (Idefics3Model,) if is_torch_available() else ()
fx_compatible = False
test_torchscript = False
test_pruning = False
test_resize_embeddings = True
test_head_masking = False
def setUp(self):
self.model_tester = Idefics3VisionText2TextModelTester(self)
self.config_tester = ConfigTester(
self, config_class=Idefics3Config, has_text_modality=False, common_properties=["image_token_id"]
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="input_embeds cannot be passed in without input_ids")
def test_inputs_embeds():
pass
@unittest.skip(reason="input_embeds cannot be passed in without input_ids")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="Model does not support padding right")
def test_flash_attn_2_inference_padding_right(self):
pass
@unittest.skip(reason="Compile not yet supported in idefics3 models")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
# We need to override as we need to prepare such that the image token is the last token
def test_resize_tokens_embeddings(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.text_config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Ignore copy
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.image_token_id
# make sure that decoder_input_ids are resized as well
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1)
self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size)
self.assertTrue(model.config.text_config.vocab_size, model.vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
# Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size
target_dimension = 128
model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0], target_dimension)
with self.assertRaisesRegex(
ValueError,
"Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer",
):
model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3)
# We need to override as we need to prepare such that the image token is the last token
def test_resize_embeddings_untied(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
original_config.tie_word_embeddings = False
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.image_token_id
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@require_torch
class Idefics3ForConditionalGenerationModelTest(GenerationTesterMixin, ModelTesterMixin, unittest.TestCase):
"""
Model tester for `Idefics3ForConditionalGeneration`.
"""
all_model_classes = (Idefics3ForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": Idefics3ForConditionalGeneration} if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = True
test_head_masking = False
test_torchscript = False
def setUp(self):
self.model_tester = Idefics3VisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Idefics3Config, has_text_modality=False)
@unittest.skip(reason="input_embeds cannot be passed in without input_ids")
def test_inputs_embeds():
pass
@unittest.skip(reason="Model does not support padding right")
def test_flash_attn_2_inference_padding_right(self):
pass
@unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn")
def test_contrastive_generate(self):
pass
@unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn")
def test_contrastive_generate_dict_outputs_use_cache(self):
pass
@unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn")
def test_contrastive_generate_low_memory(self):
pass
@unittest.skip(
reason="Prompt lookup decoding needs a way to indicate `bad_word_ids` that should not be suggested as candidates"
)
def test_prompt_lookup_decoding_matches_greedy_search(self):
pass
@pytest.mark.generate
@slow
@unittest.skip(
reason="Idefics3 doesn't support SDPA for all backbones, vision backbones has only eager/FA2 attention"
)
def test_eager_matches_sdpa_generate(self):
pass
@unittest.skip(reason="Compile not yet supported in Idefics3 models end-to-end")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
# We need to override as we need to prepare such that the image token is the last token
def test_resize_tokens_embeddings(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_vocab_size = config.text_config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.model.image_token_id
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1)
self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size)
self.assertTrue(model.config.text_config.vocab_size, model.vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
# Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size
target_dimension = 128
model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0], target_dimension)
with self.assertRaisesRegex(
ValueError,
"Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer",
):
model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3)
# We need to override as we need to prepare such that the image token is the last token
def test_resize_embeddings_untied(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
original_config.tie_word_embeddings = False
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.model.image_token_id
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@require_torch
class Idefics3ForConditionalGenerationIntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3")
self.image1 = Image.open(
BytesIO(
requests.get(
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
).content
)
)
self.image2 = Image.open(
BytesIO(requests.get("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg").content)
)
self.image3 = Image.open(
BytesIO(
requests.get(
"https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg"
).content
)
)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
@unittest.skip("multi-gpu tests are disabled for now")
def test_integration_test(self):
model = Idefics3ForConditionalGeneration.from_pretrained(
"HuggingFaceM4/Idefics3-8B-Llama3",
dtype=torch.bfloat16,
device_map="auto",
)
# Create inputs
text = "<image>In this image, we see"
images = self.image1
inputs = self.processor(text=text, images=images, return_tensors="pt", padding=True)
inputs.to(torch_device)
generated_ids = model.generate(**inputs, max_new_tokens=10)
generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
expected_generated_text = "<image>In this image, we see the Statue of Liberty, which is located on Liberty"
self.assertEqual(generated_texts[0], expected_generated_text)
@slow
@require_bitsandbytes
@unittest.skip("multi-gpu tests are disabled for now")
def test_integration_test_4bit(self):
# Let' s make sure we test the preprocessing to replace what is used
model = Idefics3ForConditionalGeneration.from_pretrained(
"HuggingFaceM4/Idefics3-8B-Llama3",
load_in_4bit=True,
device_map="auto",
)
# Create pixel inputs
text = ["<image>In this image, we see", "bla, bla <image><image>"]
images = [[self.image1], [self.image2, self.image3]]
inputs = self.processor(text=text, images=images, padding=True, return_tensors="pt")
generated_ids = model.generate(**inputs, max_new_tokens=10)
generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
expected_generated_text = "<image>In this image, we see the Statue of Liberty, trees, buildings, water"
self.assertEqual(generated_texts[0], expected_generated_text)
| transformers/tests/models/idefics3/test_modeling_idefics3.py/0 | {
"file_path": "transformers/tests/models/idefics3/test_modeling_idefics3.py",
"repo_id": "transformers",
"token_count": 10963
} | 564 |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pytest
import requests
from packaging import version
from transformers.testing_utils import require_torch, require_torch_accelerator, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import Kosmos2_5ImageProcessor
if is_torchvision_available():
from transformers import Kosmos2_5ImageProcessorFast
class Kosmos2_5ImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
size=None,
do_normalize=True,
do_convert_rgb=True,
patch_size=None,
):
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.size = size
self.do_normalize = do_normalize
self.do_convert_rgb = do_convert_rgb
self.max_patches = [512, 1024, 2048, 4096]
self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16}
def prepare_image_processor_dict(self):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def prepare_dummy_image(self):
img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class Kosmos2_5ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Kosmos2_5ImageProcessor if is_vision_available() else None
fast_image_processing_class = Kosmos2_5ImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Kosmos2_5ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
# TODO: enhance the common test to avoid overwriting
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = Image.open(
requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw
)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.flattened_patches, encoding_fast.flattened_patches, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.flattened_patches - encoding_fast.flattened_patches)).item(), 1e-3
)
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
# TODO: enhance the common test to avoid overwriting
@require_vision
@require_torch
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.flattened_patches, encoding_fast.flattened_patches, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.flattened_patches - encoding_fast.flattened_patches)).item(), 1e-3
)
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
# TODO: enhance the common test to avoid overwriting + fix this compile test.
@unittest.skip("Failing with `AttributeError: 'StrictLessThan' object has no attribute 'diff'`.")
@slow
@require_torch_accelerator
@require_vision
@pytest.mark.torch_compile_test
def test_can_compile_fast_image_processor(self):
if self.fast_image_processing_class is None:
self.skipTest("Skipping compilation test as fast image processor is not defined")
if version.parse(torch.__version__) < version.parse("2.3"):
self.skipTest(reason="This test requires torch >= 2.3 to run.")
torch.compiler.reset()
input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8)
image_processor = self.fast_image_processing_class(**self.image_processor_dict)
output_eager = image_processor(input_image, device=torch_device, return_tensors="pt")
image_processor = torch.compile(image_processor, mode="reduce-overhead")
output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(
output_eager.pixel_values, output_compiled.pixel_values, atol=1e-4, rtol=1e-4, mean_atol=1e-5
)
@unittest.skip(
reason="Kosmos2_5ImageProcessor already uses many torch operations. Fast image processor only works faster with sufficiently large batch size on GPU."
)
def test_fast_is_faster_than_slow(self):
super().test_fast_is_faster_than_slow()
def test_image_processor_properties(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
def test_expected_patches(self):
dummy_image = self.image_processor_tester.prepare_dummy_image()
image_processor = self.image_processing_class(**self.image_processor_dict)
max_patch = 2048
inputs = image_processor(dummy_image, return_tensors="pt", max_patches=max_patch)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606), atol=1e-3, rtol=1e-3))
def test_call_pil(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
def test_call_numpy(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
def test_call_numpy_4_channels(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
self.image_processor_tester.num_channels = 4
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch, input_data_format="channels_last"
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch, input_data_format="channels_last"
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
self.image_processor_tester.num_channels = 3
def test_call_pytorch(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
@require_torch
@require_vision
class Kosmos2_5ImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Kosmos2_5ImageProcessor if is_vision_available() else None
fast_image_processing_class = Kosmos2_5ImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Kosmos2_5ImageProcessingTester(self, num_channels=4)
self.expected_encoded_image_num_channels = 3
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
# TODO: enhance the common test to avoid overwriting
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet") # FIXME Amy
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = Image.open(
requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw
)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.flattened_patches, encoding_fast.flattened_patches, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.flattened_patches - encoding_fast.flattened_patches)).item(), 1e-3
)
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet")
def test_slow_fast_equivalence_batched(self):
return super().test_slow_fast_equivalence_batched()
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet")
def test_can_compile_fast_image_processor(self):
return super().test_can_compile_fast_image_processor()
@unittest.skip(
reason="Kosmos2_5ImageProcessor already uses many torch operations. Fast image processor only works faster with sufficiently large batch size on GPU."
)
def test_fast_is_faster_than_slow(self):
super().test_fast_is_faster_than_slow()
def test_image_processor_properties(self):
image_processor = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
def test_call_pil(self):
# Initialize image_processor
image_processor = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
expected_hidden_dim = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
encoded_images = image_processor(
image_inputs[0], return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(1, max_patch, expected_hidden_dim),
)
# Test batched
encoded_images = image_processor(
image_inputs, return_tensors="pt", max_patches=max_patch
).flattened_patches
self.assertEqual(
encoded_images.shape,
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_numpy(self):
return super().test_call_numpy()
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_pytorch(self):
return super().test_call_pytorch()
@unittest.skip(
reason="Kosmos2_5ImageProcessor does treat numpy and PIL 4 channel images consistently"
) # FIXME Amy
def test_call_numpy_4_channels(self):
return super().test_call_pytorch()
| transformers/tests/models/kosmos2_5/test_image_processing_kosmos2_5.py/0 | {
"file_path": "transformers/tests/models/kosmos2_5/test_image_processing_kosmos2_5.py",
"repo_id": "transformers",
"token_count": 8071
} | 565 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Llava-NeXT model."""
import copy
import unittest
import requests
from huggingface_hub import hf_hub_download
from parameterized import parameterized
from transformers import (
AutoProcessor,
LlavaNextConfig,
LlavaNextForConditionalGeneration,
LlavaNextModel,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
cleanup,
require_bitsandbytes,
require_torch,
slow,
torch_device,
)
from transformers.utils import check_torch_load_is_safe
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
from transformers.models.llava_next.modeling_llava_next import image_size_to_num_patches
if is_vision_available():
from PIL import Image
class LlavaNextVisionText2TextModelTester:
def __init__(
self,
parent,
ignore_index=-100,
image_token_index=0,
projector_hidden_act="gelu",
seq_length=7,
vision_feature_select_strategy="default",
vision_feature_layer=-1,
text_config={
"model_type": "llama",
"seq_length": 7,
"is_training": True,
"use_input_mask": True,
"use_token_type_ids": False,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 580,
"type_vocab_size": 16,
"type_sequence_label_size": 2,
"initializer_range": 0.02,
"num_labels": 3,
"num_choices": 4,
"pad_token_id": 1,
},
is_training=True,
vision_config={
"image_size": 8,
"patch_size": 4,
"num_channels": 3,
"is_training": True,
"hidden_size": 32,
"projection_dim": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.image_token_index = image_token_index
self.projector_hidden_act = projector_hidden_act
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
self.text_config = text_config
self.vision_config = vision_config
self.pad_token_id = text_config["pad_token_id"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.num_channels = 3
self.image_size = 30
self.image_grid_pinpoints = [[16, 16]]
self.num_image_tokens = 24
self.seq_length = seq_length + self.num_image_tokens
self.encoder_seq_length = self.seq_length
def get_config(self):
return LlavaNextConfig(
text_config=self.text_config,
vision_config=self.vision_config,
ignore_index=self.ignore_index,
image_token_index=self.image_token_index,
projector_hidden_act=self.projector_hidden_act,
vision_feature_select_strategy=self.vision_feature_select_strategy,
vision_feature_layer=self.vision_feature_layer,
image_grid_pinpoints=self.image_grid_pinpoints,
image_seq_length=self.num_image_tokens,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
5,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 2
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
input_ids[input_ids == config.image_token_index] = self.pad_token_id
input_ids[:, : self.num_image_tokens] = config.image_token_index
inputs_dict = {
"pixel_values": pixel_values,
"image_sizes": torch.tensor(
[[self.vision_config["image_size"], self.vision_config["image_size"]]] * self.batch_size
),
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class LlavaNextForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `LlavaNextForConditionalGeneration`.
"""
all_model_classes = (
(
LlavaNextModel,
LlavaNextForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"image-text-to-text": LlavaNextForConditionalGeneration} if is_torch_available() else {}
test_pruning = False
test_head_masking = False
_is_composite = True
def setUp(self):
self.model_tester = LlavaNextVisionText2TextModelTester(self)
common_properties = ["image_token_index", "vision_feature_layer", "image_seq_length"]
self.config_tester = ConfigTester(
self, config_class=LlavaNextConfig, has_text_modality=False, common_properties=common_properties
)
def test_config(self):
self.config_tester.run_common_tests()
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if "image_newline" in name:
continue
elif param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
curr_input_dict["image_sizes"] = curr_input_dict["image_sizes"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
image_sizes = curr_input_dict["image_sizes"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_sizes = torch.cat([image_sizes, image_sizes], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
def test_odd_sized_image(self):
# prepare model configuration
config = self.model_tester.get_config()
# prepare input
num_image_tokens = 24
pixel_values = floats_tensor([1, 5, 3, config.vision_config.image_size, config.vision_config.image_size])
input_ids = ids_tensor([1, 64], config.text_config.vocab_size - 2) + 2
input_ids[:, :num_image_tokens] = config.image_token_index
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
inputs_dict = {
"pixel_values": pixel_values,
"image_sizes": torch.tensor([[13, 16]]), # odd-sized image
"input_ids": input_ids,
"attention_mask": attention_mask,
}
# forward with odd-sized image input
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model(**inputs_dict)
@parameterized.expand(
[
(-1,),
([-1],),
([-1, -2],),
],
)
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision_feature_layer
num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer)
hidden_size = config.vision_config.hidden_size
expected_features = hidden_size * num_feature_layers
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
# We should have the right number of input features,
# and should be able to run a forward pass without exploding
base_model = getattr(model, "model", model)
assert base_model.multi_modal_projector.linear_1.in_features == expected_features
model(**input_dict)
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(
"VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test"
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@require_torch
class LlavaNextForConditionalGenerationIntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true"
self.image = Image.open(requests.get(url, stream=True).raw)
self.prompt = "[INST] <image>\nWhat is shown in this image? [/INST]"
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
@require_bitsandbytes
def test_small_model_integration_test(self):
model = LlavaNextForConditionalGeneration.from_pretrained(
"llava-hf/llava-v1.6-mistral-7b-hf",
load_in_4bit=True,
)
inputs = self.processor(images=self.image, text=self.prompt, return_tensors="pt").to(torch_device)
# verify inputs against original implementation
filepath = hf_hub_download(
repo_id="nielsr/test-image",
filename="llava_1_6_input_ids.pt",
repo_type="dataset",
)
check_torch_load_is_safe()
original_input_ids = torch.load(filepath, map_location="cpu", weights_only=True)
# replace -200 by image_token_index (since we use token ID = 32000 for the image token)
# remove image token indices because HF impl expands image tokens `image_seq_length` times
original_input_ids = original_input_ids[original_input_ids != -200]
observed_input_ids = inputs.input_ids[inputs.input_ids != model.config.image_token_index]
assert original_input_ids[0].tolist() == observed_input_ids[0].tolist()
filepath = hf_hub_download(
repo_id="nielsr/test-image",
filename="llava_1_6_pixel_values.pt",
repo_type="dataset",
)
check_torch_load_is_safe()
original_pixel_values = torch.load(filepath, map_location="cpu", weights_only=True)
assert torch.allclose(
original_pixel_values, inputs.pixel_values.to(device="cpu", dtype=original_pixel_values.dtype)
)
# verify generation
output = model.generate(**inputs, max_new_tokens=100)
EXPECTED_DECODED_TEXT = '[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot that displays values for multiple quantitative variables represented on axes starting from the same point. This particular radar chart is showing the performance of various models or systems across different metrics or datasets.\n\nThe chart is divided into several sections, each representing a different model or dataset. The axes represent different metrics or datasets, such as "MMM-Vet," "MMM-Bench," "L'
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_bitsandbytes
def test_small_model_integration_test_batch(self):
model = LlavaNextForConditionalGeneration.from_pretrained(
"llava-hf/llava-v1.6-mistral-7b-hf", load_in_4bit=True
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
cats_image = Image.open(requests.get(url, stream=True).raw)
inputs = self.processor(
images=[self.image, cats_image],
text=[self.prompt, self.prompt],
return_tensors="pt",
padding=True,
).to(torch_device)
# it should not matter whether two images are the same size or not
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = ['[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot that displays', '[INST] \nWhat is shown in this image? [/INST] The image shows two cats lying on a pink surface, which appears to be a couch or a cush'] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_bitsandbytes
def test_small_model_integration_test_unk_token(self):
# related to (#29835)
model = LlavaNextForConditionalGeneration.from_pretrained(
"llava-hf/llava-v1.6-mistral-7b-hf",
load_in_4bit=True,
)
prompt_with_unk = "[INST] <image>\nWhat is shown in this <unk> image? [/INST]"
inputs = self.processor(images=self.image, text=prompt_with_unk, return_tensors="pt")
# verify single forward pass
inputs = inputs.to(torch_device)
with torch.no_grad():
output = model(**inputs)
# verify generation
output = model.generate(**inputs, max_new_tokens=40)
EXPECTED_DECODED_TEXT = '[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot that displays values for multiple quantitative variables represented on axes starting from the same point. This particular radar chart' # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_bitsandbytes
def test_small_model_integration_test_batch_different_resolutions(self):
model = LlavaNextForConditionalGeneration.from_pretrained(
"llava-hf/llava-v1.6-mistral-7b-hf",
load_in_4bit=True,
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowres_url = "https://4.img-dpreview.com/files/p/TS560x560~forums/56876524/03975b28741443319e9a94615e35667e"
cats_image = Image.open(requests.get(url, stream=True).raw)
lowres_img = Image.open(requests.get(lowres_url, stream=True).raw)
inputs = self.processor(
images=[lowres_img, cats_image], text=[self.prompt, self.prompt], return_tensors="pt", padding=True
).to(torch_device)
pixel_values = inputs["pixel_values"]
# verify pixel values are padded correctly with 0 when one image has more num_patches than the other
image_num_patches = [
image_size_to_num_patches(
image_size=imsize,
grid_pinpoints=model.config.image_grid_pinpoints,
patch_size=model.config.vision_config.image_size,
)
for imsize in inputs["image_sizes"]
]
for pix_val, num_patch in zip(pixel_values, image_num_patches):
self.assertTrue(torch.all(pix_val[num_patch:] == 0)) # pad on the right
for i in range(num_patch):
self.assertFalse(torch.all(pix_val[i : i + 1] == 0)) # no padding expected in any of patches
# verify generation
output = model.generate(**inputs, max_new_tokens=50)
EXPECTED_DECODED_TEXT = "[INST] \nWhat is shown in this image? [/INST] The image shows two deer, likely fawns, in a grassy area with trees in the background. The setting appears to be a forest or woodland, and the photo is taken during what seems to be either dawn or dusk, given"
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
@require_bitsandbytes
def test_small_model_integration_test_batch_matches_single(self):
model = LlavaNextForConditionalGeneration.from_pretrained(
"llava-hf/llava-v1.6-mistral-7b-hf",
load_in_4bit=True,
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowres_url = "https://4.img-dpreview.com/files/p/TS560x560~forums/56876524/03975b28741443319e9a94615e35667e"
cats_image = Image.open(requests.get(url, stream=True).raw)
lowres_img = Image.open(requests.get(lowres_url, stream=True).raw)
inputs_batched = self.processor(
images=[lowres_img, cats_image], text=[self.prompt, self.prompt], return_tensors="pt", padding=True
).to(torch_device)
inputs_single = self.processor(images=lowres_img, text=self.prompt, return_tensors="pt", padding=True).to(
torch_device
)
# verify generation
output_batched = model.generate(**inputs_batched, max_new_tokens=50)
output_single = model.generate(**inputs_single, max_new_tokens=50)
self.assertEqual(
self.processor.decode(output_batched[0], skip_special_tokens=True),
self.processor.decode(output_single[0], skip_special_tokens=True),
)
@slow
@require_bitsandbytes
def test_small_model_integration_test_full_vision_state_selection(self):
model = LlavaNextForConditionalGeneration.from_pretrained(
"llava-hf/llava-v1.6-mistral-7b-hf",
load_in_4bit=True,
)
# test that changing `strategy` won't error out
model.vision_feature_select_strategy = "full"
inputs = self.processor(text=self.prompt, images=self.image, return_tensors="pt").to(model.device)
# verify generation
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = '[INST] \nWhat is shown in this image? [/INST] The image appears to be a radar chart, which is a type of multi-dimensional plot that displays values for multiple quantitative variables represented on axes' # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_granite_vision(self):
"""
Check the expected output of a granite vision model, which leverages
multiple vision feature layers and a visual encoder with no CLS (siglip).
"""
granite_model_path = "ibm-granite/granite-vision-3.1-2b-preview"
model = LlavaNextForConditionalGeneration.from_pretrained(granite_model_path)
self.processor = AutoProcessor.from_pretrained(granite_model_path)
prompt = "<|user|>\n<image>\nWhat is shown in this image?\n<|assistant|>\n"
inputs = self.processor(text=prompt, images=self.image, return_tensors="pt").to(model.device)
# verify generation
output = model.generate(**inputs, max_new_tokens=30)
EXPECTED_DECODED_TEXT = "<|user|>\n\nWhat is shown in this image?\n<|assistant|>\nThe image displays a radar chart comparing the performance of various machine learning models." # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
| transformers/tests/models/llava_next/test_modeling_llava_next.py/0 | {
"file_path": "transformers/tests/models/llava_next/test_modeling_llava_next.py",
"repo_id": "transformers",
"token_count": 10134
} | 566 |
# Copyright 2023 Mistral AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Mistral model."""
import gc
import unittest
import pytest
from packaging import version
from parameterized import parameterized
from transformers import AutoTokenizer, DynamicCache, MistralConfig, is_torch_available, set_seed
from transformers.cache_utils import DynamicSlidingWindowLayer
from transformers.testing_utils import (
DeviceProperties,
Expectations,
backend_empty_cache,
cleanup,
get_device_properties,
require_bitsandbytes,
require_flash_attn,
require_read_token,
require_torch,
require_torch_accelerator,
require_torch_gpu,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import (
MistralForCausalLM,
MistralForQuestionAnswering,
MistralForSequenceClassification,
MistralForTokenClassification,
MistralModel,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
class MistralModelTester(CausalLMModelTester):
config_class = MistralConfig
if is_torch_available():
base_model_class = MistralModel
causal_lm_class = MistralForCausalLM
sequence_class = MistralForSequenceClassification
token_class = MistralForTokenClassification
question_answering_class = MistralForQuestionAnswering
@require_torch
class MistralModelTest(CausalLMModelTest, unittest.TestCase):
all_model_classes = (
(
MistralModel,
MistralForCausalLM,
MistralForSequenceClassification,
MistralForTokenClassification,
MistralForQuestionAnswering,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": MistralModel,
"text-classification": MistralForSequenceClassification,
"token-classification": MistralForTokenClassification,
"text-generation": MistralForCausalLM,
"question-answering": MistralForQuestionAnswering,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
model_tester_class = MistralModelTester
# TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="Mistral flash attention does not support right padding")
@require_torch_accelerator
@require_read_token
class MistralIntegrationTest(unittest.TestCase):
# This variable is used to determine which accelerator are we using for our runners (e.g. A10 or T4)
# Depending on the hardware we get different logits / generations
device_properties: DeviceProperties = (None, None, None)
@classmethod
def setUpClass(cls):
cls.device_properties = get_device_properties()
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_model_7b_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto", dtype=torch.float16)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
with torch.no_grad():
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-2.5548, -2.5737, -3.0600, -2.5906, -2.8478, -2.8118, -2.9325, -2.7694]])
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# ("cuda", 8) for A100/A10, and ("cuda", 7) 7 for T4.
# considering differences in hardware processing and potential deviations in output.
# fmt: off
EXPECTED_SLICES = Expectations(
{
("cuda", 7): torch.tensor([-5.8828, -5.8633, -0.1042, -4.7266, -5.8828, -5.8789, -5.8789, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -1.0801, 1.7598, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828]),
("cuda", 8): torch.tensor([-5.8711, -5.8555, -0.1050, -4.7148, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -1.0781, 1.7568, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711]),
("rocm", 9): torch.tensor([-5.8750, -5.8594, -0.1047, -4.7188, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -1.0781, 1.7578, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750]),
}
)
# fmt: on
expected_slice = EXPECTED_SLICES.get_expectation()
torch.testing.assert_close(out[0, 0, :30], expected_slice, atol=1e-4, rtol=1e-4)
@slow
@require_bitsandbytes
def test_model_7b_generation(self):
EXPECTED_TEXT_COMPLETION = "My favourite condiment is 100% ketchup. I’m not a fan of mustard, mayo,"
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
model = MistralForCausalLM.from_pretrained(
"mistralai/Mistral-7B-v0.1", device_map={"": torch_device}, load_in_4bit=True
)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@slow
def test_model_7b_dola_generation(self):
# ground truth text generated with dola_layers="low", repetition_penalty=1.2
EXPECTED_TEXT_COMPLETION = (
"""My favourite condiment is 100% ketchup. I love it on everything, and I’m not ash"""
)
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto", dtype=torch.float16)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(
input_ids, max_new_tokens=20, temperature=0, dola_layers="low", repetition_penalty=1.2
)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
backend_empty_cache(torch_device)
gc.collect()
@require_flash_attn
@require_bitsandbytes
@slow
@pytest.mark.flash_attn_test
def test_model_7b_long_prompt(self):
EXPECTED_OUTPUT_TOKEN_IDS = [306, 338]
# An input with 4097 tokens that is above the size of the sliding window
input_ids = [1] + [306, 338] * 2048
model = MistralForCausalLM.from_pretrained(
"mistralai/Mistral-7B-v0.1",
device_map={"": torch_device},
load_in_4bit=True,
attn_implementation="flash_attention_2",
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
# Assisted generation
assistant_model = model
assistant_model.generation_config.num_assistant_tokens = 2
assistant_model.generation_config.num_assistant_tokens_schedule = "constant"
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
@slow
def test_model_7b_long_prompt_sdpa(self):
EXPECTED_OUTPUT_TOKEN_IDS = [306, 338]
# An input with 4097 tokens that is above the size of the sliding window
input_ids = [1] + [306, 338] * 2048
model = MistralForCausalLM.from_pretrained(
"mistralai/Mistral-7B-v0.1", device_map="auto", attn_implementation="sdpa", dtype=torch.float16
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
# Assisted generation
assistant_model = model
assistant_model.generation_config.num_assistant_tokens = 2
assistant_model.generation_config.num_assistant_tokens_schedule = "constant"
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
del assistant_model
backend_empty_cache(torch_device)
gc.collect()
EXPECTED_TEXT_COMPLETION = """My favourite condiment is 100% ketchup. I love it on everything. I’m not a big"""
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@slow
def test_speculative_generation(self):
EXPECTED_TEXT_COMPLETION = "My favourite condiment is 100% Sriracha. I love it on everything. I have it on my"
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto", dtype=torch.float16)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
set_seed(0)
generated_ids = model.generate(
input_ids, max_new_tokens=20, do_sample=True, temperature=0.3, assistant_model=model
)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@pytest.mark.torch_compile_test
@slow
def test_compile_static_cache(self):
# `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2
# work as intended. See https://github.com/pytorch/pytorch/issues/121943
if version.parse(torch.__version__) < version.parse("2.3.0"):
self.skipTest(reason="This test requires torch >= 2.3 to run.")
if self.device_properties[0] == "cuda" and self.device_properties[1] == 7:
self.skipTest(reason="This test is failing (`torch.compile` fails) on Nvidia T4 GPU.")
NUM_TOKENS_TO_GENERATE = 40
EXPECTED_TEXT_COMPLETION = [
"My favourite condiment is 100% ketchup. I love it on everything. "
"I’m not a big fan of mustard, mayo, or relish. I’m not a fan of pickles"
]
prompts = ["My favourite condiment is "]
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
tokenizer.pad_token = tokenizer.eos_token
model = MistralForCausalLM.from_pretrained(
"mistralai/Mistral-7B-v0.1", device_map=torch_device, dtype=torch.float16
)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
# Dynamic Cache
generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False)
dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text)
# Static Cache
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text)
# Sliding Window Cache
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="sliding_window"
)
static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text)
# Static Cache + compile
forward_function = model.__call__
model.__call__ = torch.compile(forward_function, mode="reduce-overhead", fullgraph=True)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text)
# Sliding Window Cache + compile
torch._dynamo.reset()
model.__call__ = torch.compile(forward_function, mode="reduce-overhead", fullgraph=True)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="sliding_window"
)
static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text)
@parameterized.expand([("flash_attention_2",), ("sdpa",), ("flex_attention",), ("eager",)])
@require_flash_attn
@slow
def test_generation_beyond_sliding_window_dynamic(self, attn_implementation: str):
"""Test that we can correctly generate beyond the sliding window. This is non-trivial as Mistral will use
a DynamicCache with only sliding layers."""
model_id = "mistralai/Mistral-7B-v0.1"
EXPECTED_COMPLETIONS = [
"This is a nice place. This is a nice place. This is a nice place. This is",
", green, yellow, orange, purple, pink, brown, black, white, gray, silver",
]
input_text = [
"This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens
"A list of colors: red, blue", # This will almost all be padding tokens
]
tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left")
tokenizer.pad_token_id = tokenizer.eos_token_id
inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device)
model = MistralForCausalLM.from_pretrained(
model_id, attn_implementation=attn_implementation, device_map=torch_device, dtype=torch.float16
)
# Make sure prefill is larger than sliding window
input_size = inputs.input_ids.shape[-1]
self.assertTrue(input_size > model.config.sliding_window)
# Should already be Dynamic by default, but let's make sure!
out = model.generate(**inputs, max_new_tokens=20, cache_implementation="dynamic", return_dict_in_generate=True)
output_text = tokenizer.batch_decode(out.sequences[:, input_size:])
self.assertEqual(output_text, EXPECTED_COMPLETIONS)
# Let's check that the dynamic cache has hybrid layers!
dynamic_cache = out.past_key_values
self.assertTrue(isinstance(dynamic_cache, DynamicCache))
for layer in dynamic_cache.layers:
self.assertTrue(isinstance(layer, DynamicSlidingWindowLayer))
self.assertEqual(layer.keys.shape[-2], model.config.sliding_window - 1)
@slow
@require_torch_accelerator
class Mask4DTestHard(unittest.TestCase):
model_name = "mistralai/Mistral-7B-v0.1"
model = None
model_dtype = None
@classmethod
def setUpClass(cls):
cleanup(torch_device, gc_collect=True)
if cls.model_dtype is None:
cls.model_dtype = torch.float16
if cls.model is None:
cls.model = MistralForCausalLM.from_pretrained(cls.model_name, dtype=cls.model_dtype).to(torch_device)
@classmethod
def tearDownClass(cls):
del cls.model_dtype
del cls.model
cleanup(torch_device, gc_collect=True)
def setUp(self):
cleanup(torch_device, gc_collect=True)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name, use_fast=False)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def get_test_data(self):
template = "my favorite {}"
items = ("pet is a", "artist plays a", "name is L") # same number of tokens in each item
batch_separate = [template.format(x) for x in items] # 3 separate lines
batch_shared_prefix = template.format(" ".join(items)) # 1 line with options concatenated
input_ids = self.tokenizer(batch_separate, return_tensors="pt").input_ids.to(torch_device)
input_ids_shared_prefix = self.tokenizer(batch_shared_prefix, return_tensors="pt").input_ids.to(torch_device)
mask_shared_prefix = torch.tensor(
[
[
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1],
]
]
],
device=torch_device,
)
position_ids = torch.arange(input_ids.shape[1]).tile(input_ids.shape[0], 1).to(torch_device)
# building custom positions ids based on custom mask
position_ids_shared_prefix = (mask_shared_prefix.sum(dim=-1) - 1).reshape(1, -1)
# effectively: position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5]]).to(device)
# inverting the mask
min_dtype = torch.finfo(self.model_dtype).min
mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=self.model_dtype) * min_dtype
return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix
def test_stacked_causal_mask(self):
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self.get_test_data()
# regular batch
logits = self.model.forward(input_ids, position_ids=position_ids).logits
logits_last = logits[:, -1, :] # last tokens in each batch line
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
# single forward run with 4D custom mask
logits_shared_prefix = self.model.forward(
input_ids_shared_prefix, attention_mask=mask_shared_prefix, position_ids=position_ids_shared_prefix
).logits
logits_shared_prefix_last = logits_shared_prefix[
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1], :
] # last three tokens
decoded_shared_prefix = [self.tokenizer.decode(t) for t in logits_shared_prefix_last.argmax(dim=-1)]
self.assertEqual(decoded, decoded_shared_prefix)
def test_partial_stacked_causal_mask(self):
# Same as the test above, but the input is passed in two groups. It tests that we can pass partial 4D attention masks
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self.get_test_data()
# regular batch
logits = self.model.forward(input_ids, position_ids=position_ids).logits
logits_last = logits[:, -1, :] # last tokens in each batch line
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
# 2 forward runs with custom 4D masks
part_a = 3 # split point
input_1a = input_ids_shared_prefix[:, :part_a]
position_ids_1a = position_ids_shared_prefix[:, :part_a]
mask_1a = mask_shared_prefix[:, :, :part_a, :part_a]
outs_1a = self.model.forward(input_1a, attention_mask=mask_1a, position_ids=position_ids_1a)
past_key_values_a = outs_1a["past_key_values"]
# Case 1: we pass a 4D attention mask regarding the current sequence length (i.e. [..., seq_len, full_len])
input_1b = input_ids_shared_prefix[:, part_a:]
position_ids_1b = position_ids_shared_prefix[:, part_a:]
mask_1b = mask_shared_prefix[:, :, part_a:, :]
outs_1b = self.model.forward(
input_1b, attention_mask=mask_1b, position_ids=position_ids_1b, past_key_values=past_key_values_a
)
decoded_1b = [
self.tokenizer.decode(t)
for t in outs_1b.logits.argmax(-1)[
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1] - part_a
]
]
self.assertEqual(decoded, decoded_1b)
| transformers/tests/models/mistral/test_modeling_mistral.py/0 | {
"file_path": "transformers/tests/models/mistral/test_modeling_mistral.py",
"repo_id": "transformers",
"token_count": 10346
} | 567 |
import shutil
import tempfile
import unittest
from transformers import Owlv2Processor
from transformers.testing_utils import require_scipy
from ...test_processing_common import ProcessorTesterMixin
@require_scipy
class Owlv2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Owlv2Processor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
processor = cls.processor_class.from_pretrained("google/owlv2-base-patch16-ensemble")
processor.save_pretrained(cls.tmpdirname)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
| transformers/tests/models/owlv2/test_processing_owlv2.py/0 | {
"file_path": "transformers/tests/models/owlv2/test_processing_owlv2.py",
"repo_id": "transformers",
"token_count": 243
} | 568 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Pixtral model."""
import unittest
from transformers import (
PixtralVisionConfig,
PixtralVisionModel,
is_torch_available,
)
from transformers.testing_utils import (
require_torch,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
class PixtralVisionModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in Pixtral, the seq length equals the number of patches * batch_size because the patches are flattened
self.seq_length = (image_size // patch_size) ** 2 * batch_size
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
image_sizes = torch.tensor(
[[self.image_size, self.image_size]] * self.batch_size, dtype=torch.long, device=torch_device
)
config = self.get_config()
return config, pixel_values, image_sizes
def get_config(self):
return PixtralVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, image_sizes = config_and_inputs
inputs_dict = {"pixel_values": pixel_values, "image_sizes": image_sizes}
return config, inputs_dict
@require_torch
class PixtralVisionModelModelTest(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `PixtralVisionModel`.
"""
all_model_classes = (PixtralVisionModel,) if is_torch_available() else ()
additional_model_inputs = ["image_sizes"]
test_pruning = False
test_head_masking = False
test_torchscript = False
test_resize_embeddings = False
def setUp(self):
self.model_tester = PixtralVisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=PixtralVisionConfig, has_text_modality=False)
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, torch.nn.Linear))
| transformers/tests/models/pixtral/test_modeling_pixtral.py/0 | {
"file_path": "transformers/tests/models/pixtral/test_modeling_pixtral.py",
"repo_id": "transformers",
"token_count": 1864
} | 569 |
# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
import unittest
from unittest.mock import patch
import numpy as np
import requests
from transformers import BartTokenizer, T5Tokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import (
cleanup,
get_tests_dir,
require_sentencepiece,
require_tokenizers,
require_torch,
require_torch_non_multi_accelerator,
slow,
torch_device,
)
from transformers.utils import cached_property, is_datasets_available, is_faiss_available, is_torch_available
from ..bart.test_modeling_bart import BartModelTester
from ..dpr.test_modeling_dpr import DPRModelTester
from ..t5.test_modeling_t5 import T5ModelTester
TOLERANCE = 1e-3
T5_SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available() and is_datasets_available() and is_faiss_available():
import faiss
import torch
from datasets import Dataset, load_dataset
from transformers import (
AutoConfig,
AutoModel,
AutoModelForSeq2SeqLM,
DPRContextEncoder,
RagConfig,
RagModel,
RagRetriever,
RagSequenceForGeneration,
RagTokenForGeneration,
RagTokenizer,
)
from transformers.modeling_outputs import BaseModelOutput
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b aren't both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def require_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
[`RagRetriever`].
These tests are skipped when respective libraries are not installed.
"""
if not (is_torch_available() and is_datasets_available() and is_faiss_available()):
test_case = unittest.skip(reason="test requires PyTorch, datasets and faiss")(test_case)
return test_case
@require_torch
@require_retrieval
@require_sentencepiece
class RagTestMixin:
all_model_classes = (
(RagModel, RagTokenForGeneration, RagSequenceForGeneration)
if is_torch_available() and is_datasets_available() and is_faiss_available()
else ()
)
retrieval_vector_size = 32
n_docs = 3
max_combined_length = 16
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
t5_tokenizer = T5Tokenizer(T5_SAMPLE_VOCAB)
t5_tokenizer_path = os.path.join(self.tmpdirname, "t5_tokenizer")
t5_tokenizer.save_pretrained(t5_tokenizer_path)
@cached_property
def dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
@cached_property
def dpr_ctx_encoder_tokenizer(self) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
@cached_property
def bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
@cached_property
def t5_tokenizer(self) -> BartTokenizer:
return T5Tokenizer.from_pretrained(os.path.join(self.tmpdirname, "t5_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
# clean-up as much as possible GPU memory occupied by PyTorch
cleanup(torch_device)
def get_retriever(self, config):
dataset = Dataset.from_dict(
{
"id": ["0", "1", "3"],
"text": ["foo", "bar", "qux"],
"title": ["Foo", "Bar", "Qux"],
"embeddings": [
np.ones(self.retrieval_vector_size),
2 * np.ones(self.retrieval_vector_size),
3 * np.ones(self.retrieval_vector_size),
],
}
)
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT)
tokenizer = self.bart_tokenizer if config.generator.model_type == "bart" else self.t5_tokenizer
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = dataset
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.dpr_tokenizer,
generator_tokenizer=tokenizer,
)
return retriever
def check_model_with_retriever(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes:
model = model_class(config, retriever=self.get_retriever(config)).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_with_end2end_retriever(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
context_encoder_tokenizer = self.dpr_ctx_encoder_tokenizer
dpr_context_encoder = DPRContextEncoder(config.question_encoder) # dpr is a twin tower
retriever = self.get_retriever(config)
retriever.set_ctx_encoder_tokenizer(context_encoder_tokenizer) # setting the ctx_encoder_tokenizer.
for model_class in [RagTokenForGeneration, RagSequenceForGeneration]:
model = model_class(config, retriever=retriever)
model.set_context_encoder_for_training(dpr_context_encoder) # set the context_encoder for training
model.to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_generate_from_context_input_ids(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.detach().to(device="cpu", dtype=torch.float32).numpy(),
prefix=config.generator.prefix,
return_tensors="pt",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
# cast
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
1
)
outputs = model.generate(
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
do_deduplication=True,
)
self.assertIsNotNone(outputs)
def check_model_generate(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes[1:]:
model = model_class(config, retriever=self.get_retriever(config)).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model.generate(
input_ids=input_ids,
num_beams=2,
num_return_sequences=2,
decoder_start_token_id=config.generator.eos_token_id,
)
self.assertIsNotNone(outputs)
def check_model_without_retriever(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.detach().to(device="cpu", dtype=torch.float32).numpy(),
prefix=config.generator.prefix,
return_tensors="pt",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
# cast
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
1
)
outputs = model(
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_custom_n_docs(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, n_docs, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.detach().to(device="cpu", dtype=torch.float32).numpy(),
prefix=config.generator.prefix,
return_tensors="pt",
n_docs=n_docs,
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
# cast
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
1
)
outputs = model(
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
n_docs=n_docs,
)
# logits
self.assertEqual(
outputs.logits.shape,
(n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], n_docs))
def check_model_with_mismatch_n_docs_value(
self,
config,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
retriever_n_docs,
generator_n_docs,
**kwargs,
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(
input_ids,
question_hidden_states.detach().to(device="cpu", dtype=torch.float32).numpy(),
prefix=config.generator.prefix,
return_tensors="pt",
n_docs=retriever_n_docs,
)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
# cast
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
# compute doc_scores
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
1
)
self.assertRaises(
AssertionError,
model.__call__,
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
n_docs=generator_n_docs,
)
def check_model_with_encoder_outputs(
self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs
):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes:
model = model_class(config, retriever=self.get_retriever(config)).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
encoder_outputs = BaseModelOutput(outputs.generator_enc_last_hidden_state)
# run only generator
outputs = model(
encoder_outputs=encoder_outputs,
doc_scores=outputs.doc_scores,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# logits
self.assertEqual(
outputs.logits.shape,
(self.n_docs * decoder_input_ids.shape[0], decoder_input_ids.shape[1], config.generator.vocab_size),
)
# generator encoder last hidden states
self.assertEqual(
outputs.generator_enc_last_hidden_state.shape,
(self.n_docs * decoder_input_ids.shape[0], self.max_combined_length, config.generator.hidden_size),
)
# doc scores
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def test_model_with_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_with_retriever(**inputs_dict)
def test_model_with_end2end_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_with_end2end_retriever(**inputs_dict)
def test_model_without_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_without_retriever(**inputs_dict)
def test_model_with_encoder_outputs(self):
inputs_dict = self.config_and_inputs
self.check_model_with_encoder_outputs(**inputs_dict)
def test_model_generate(self):
inputs_dict = self.config_and_inputs
self.check_model_generate(**inputs_dict)
def test_model_with_custom_n_docs(self):
inputs_dict = self.config_and_inputs
inputs_dict["n_docs"] = 1
self.check_model_custom_n_docs(**inputs_dict)
def test_model_with_mismatch_n_docs_value(self):
inputs_dict = self.config_and_inputs
inputs_dict["retriever_n_docs"] = 3
inputs_dict["generator_n_docs"] = 2
self.check_model_with_mismatch_n_docs_value(**inputs_dict)
@require_torch
@require_retrieval
class RagDPRBartTest(RagTestMixin, unittest.TestCase):
@cached_property
def config_and_inputs(self):
question_encoder_tester = DPRModelTester(self)
dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs()
generator_tester = BartModelTester(self)
bart_config_and_inputs = generator_tester.prepare_config_and_inputs_for_common()
(question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs
(generator_config, bart_inputs_dict) = bart_config_and_inputs
decoder_input_ids, decoder_attention_mask = bart_inputs_dict["input_ids"], bart_inputs_dict["attention_mask"]
config = RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
n_docs=self.n_docs,
retrieval_vector_size=self.retrieval_vector_size,
max_combined_length=self.max_combined_length,
)
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
@require_torch
@require_retrieval
class RagDPRT5Test(RagTestMixin, unittest.TestCase):
@cached_property
def config_and_inputs(self):
question_encoder_tester = DPRModelTester(self)
dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs()
generator_tester = T5ModelTester(self, vocab_size=1101)
t5_config_and_inputs = generator_tester.prepare_config_and_inputs()
(question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs
(generator_config, _, decoder_input_ids, _, decoder_attention_mask, _) = t5_config_and_inputs
config = RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
n_docs=self.n_docs,
retrieval_vector_size=self.retrieval_vector_size,
max_combined_length=self.max_combined_length,
)
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
}
@require_torch
@require_retrieval
@require_sentencepiece
@require_tokenizers
@require_torch_non_multi_accelerator
class RagModelIntegrationTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temp_dir = tempfile.TemporaryDirectory()
cls.dataset_path = cls.temp_dir.name
cls.index_path = os.path.join(cls.temp_dir.name, "index.faiss")
ds = load_dataset("hf-internal-testing/wiki_dpr_dummy")["train"]
ds.save_to_disk(cls.dataset_path)
url = "https://huggingface.co/datasets/hf-internal-testing/wiki_dpr_dummy/resolve/main/index.faiss"
response = requests.get(url, stream=True)
with open(cls.index_path, "wb") as fp:
fp.write(response.content)
@classmethod
def tearDownClass(cls):
cls.temp_dir.cleanup()
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
cleanup(torch_device, gc_collect=True)
@cached_property
def sequence_model(self):
return (
RagSequenceForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn"
)
.to(torch_device)
.eval()
)
@cached_property
def token_model(self):
return (
RagTokenForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base", "facebook/bart-large-cnn"
)
.to(torch_device)
.eval()
)
def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn")
return RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
bos_token_id=0,
decoder_start_token_id=2,
eos_token_id=2,
is_encoder_decoder=True,
pad_token_id=1,
vocab_size=50264,
title_sep=" / ",
doc_sep=" // ",
n_docs=5,
max_combined_length=300,
dataset="wiki_dpr",
dataset_split="train",
index_name="custom",
passages_path=self.dataset_path,
index_path=self.index_path,
use_dummy_dataset=True,
retrieval_vector_size=768,
retrieval_batch_size=8,
dataset_revision="b24a417",
)
@slow
def test_rag_sequence_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_sequence = self.sequence_model
rag_sequence.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with torch.no_grad():
output = rag_sequence(
input_ids,
labels=decoder_input_ids,
)
expected_shape = torch.Size([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = torch.tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]).to(torch_device)
_assert_tensors_equal(expected_doc_scores, output.doc_scores, atol=TOLERANCE)
expected_loss = torch.tensor([36.7368]).to(torch_device)
_assert_tensors_equal(expected_loss, output.loss, atol=TOLERANCE)
@slow
def test_rag_token_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with torch.no_grad():
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
expected_shape = torch.Size([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = torch.tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]]).to(torch_device)
_assert_tensors_equal(expected_doc_scores, output.doc_scores, atol=TOLERANCE)
expected_loss = torch.tensor([36.3557]).to(torch_device)
_assert_tensors_equal(expected_loss, output.loss, atol=TOLERANCE)
@slow
def test_rag_token_generate_beam(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
input_ids = input_ids.to(torch_device)
output_ids = rag_token.generate(
input_ids,
decoder_start_token_id=rag_token.generator.config.decoder_start_token_id,
num_beams=2,
num_return_sequences=2,
)
# sequence generate test
output_text_1 = rag_decoder_tokenizer.decode(output_ids[0], skip_special_tokens=True)
output_text_2 = rag_decoder_tokenizer.decode(output_ids[1], skip_special_tokens=True)
# Expected outputs as given by model at integration time.
EXPECTED_OUTPUT_TEXT_1 = '"She\'s My Kind of Girl" was released through Epic Records in Japan in March 1972. The song was a Top 10 hit in the country. It was the first single to be released by ABBA in the UK. The single was followed by "En Carousel" and "Love Has Its Uses"'
EXPECTED_OUTPUT_TEXT_2 = '"She\'s My Kind of Girl" was released through Epic Records in Japan in March 1972. The song was a Top 10 hit in the country. It was the first single to be released by ABBA in the UK. The single was followed by "En Carousel" and "Love Has Its Ways"'
self.assertEqual(output_text_1, EXPECTED_OUTPUT_TEXT_1)
self.assertEqual(output_text_2, EXPECTED_OUTPUT_TEXT_2)
@slow
def test_rag_sequence_generate_beam(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
rag_sequence = self.sequence_model
rag_sequence.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
input_ids = input_ids.to(torch_device)
output_ids = rag_sequence.generate(
input_ids,
decoder_start_token_id=rag_sequence.generator.config.decoder_start_token_id,
num_beams=2,
num_return_sequences=2,
)
# sequence generate test
output_text_1 = rag_decoder_tokenizer.decode(output_ids[0], skip_special_tokens=True)
output_text_2 = rag_decoder_tokenizer.decode(output_ids[1], skip_special_tokens=True)
# Expected outputs as given by model at integration time.
EXPECTED_OUTPUT_TEXT_1 = """\"She's My Kind of Girl\" was released through Epic Records in Japan in March 1972, giving the duo a Top 10 hit. Two more singles were released in Japan, \"En Carousel\" and \"Love Has Its Ways\" Ulvaeus and Andersson persevered with their songwriting and experimented with new sounds and vocal arrangements."""
EXPECTED_OUTPUT_TEXT_2 = """In September 2018, Björn Ulvaeus revealed that the two new songs, \"I Still Have Faith In You\" and \"Don't Shut Me Down\", would be released no earlier than March 2019. The two new tracks will feature in a TV special set to air later in the year."""
self.assertEqual(output_text_1, EXPECTED_OUTPUT_TEXT_1)
self.assertEqual(output_text_2, EXPECTED_OUTPUT_TEXT_2)
@property
def test_data_questions(self):
return [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
]
@slow
def test_rag_sequence_generate_batch(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq",
index_name="custom",
passages_path=self.dataset_path,
index_path=self.index_path,
)
rag_sequence = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever).to(
torch_device
)
input_dict = tokenizer(
self.test_data_questions,
return_tensors="pt",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids.to(torch_device)
attention_mask = input_dict.attention_mask.to(torch_device)
output_ids = rag_sequence.generate(
input_ids,
attention_mask=attention_mask,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
# PR #31938 cause the output being changed from `june 22, 2018` to `june 22 , 2018`.
EXPECTED_OUTPUTS = [
" albert einstein",
" june 22 , 2018",
" amplitude modulation",
" tim besley ( chairman )",
" june 20 , 2018",
" 1980",
" 7.0",
" 8",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@slow
def test_rag_sequence_generate_batch_from_context_input_ids(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-sequence-nq",
index_name="custom",
passages_path=self.dataset_path,
index_path=self.index_path,
)
rag_sequence = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", retriever=retriever).to(
torch_device
)
input_dict = tokenizer(
self.test_data_questions,
return_tensors="pt",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids.to(torch_device)
attention_mask = input_dict.attention_mask.to(torch_device)
question_hidden_states = rag_sequence.question_encoder(input_ids, attention_mask=attention_mask)[0]
docs_dict = retriever(
input_ids.detach().cpu().numpy(), question_hidden_states.detach().cpu().numpy(), return_tensors="pt"
)
doc_scores = torch.bmm(
question_hidden_states.unsqueeze(1),
docs_dict["retrieved_doc_embeds"].to(torch_device).float().transpose(1, 2),
).squeeze(1)
output_ids = rag_sequence.generate(
context_input_ids=docs_dict["context_input_ids"].to(torch_device),
context_attention_mask=docs_dict["context_attention_mask"].to(torch_device),
doc_scores=doc_scores.to(torch_device),
do_deduplication=True,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" june 22 , 2018",
" amplitude modulation",
" tim besley ( chairman )",
" june 20 , 2018",
" 1980",
" 7.0",
" 8",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@slow
def test_rag_token_generate_batch(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
retriever = RagRetriever.from_pretrained(
"facebook/rag-token-nq", index_name="custom", passages_path=self.dataset_path, index_path=self.index_path
)
rag_token = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever).to(
torch_device
)
if torch_device != "cpu":
rag_token.half()
input_dict = tokenizer(
self.test_data_questions,
return_tensors="pt",
padding=True,
truncation=True,
)
input_ids = input_dict.input_ids.to(torch_device)
attention_mask = input_dict.attention_mask.to(torch_device)
output_ids = rag_token.generate(
input_ids,
attention_mask=attention_mask,
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [
" albert einstein",
" september 22 , 2017",
" amplitude modulation",
" stefan persson",
" april 20 , 2018",
" the 1970s",
" 7.1. 2",
" 13",
]
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
@require_torch
@require_retrieval
class RagModelSaveLoadTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temp_dir = tempfile.TemporaryDirectory()
cls.dataset_path = cls.temp_dir.name
cls.index_path = os.path.join(cls.temp_dir.name, "index.faiss")
ds = load_dataset("hf-internal-testing/wiki_dpr_dummy")["train"]
ds.save_to_disk(cls.dataset_path)
url = "https://huggingface.co/datasets/hf-internal-testing/wiki_dpr_dummy/resolve/main/index.faiss"
response = requests.get(url, stream=True)
with open(cls.index_path, "wb") as fp:
fp.write(response.content)
@classmethod
def tearDownClass(cls):
cls.temp_dir.cleanup()
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
cleanup(torch_device, gc_collect=True)
def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator_config = AutoConfig.from_pretrained("facebook/bart-large-cnn")
return RagConfig.from_question_encoder_generator_configs(
question_encoder_config,
generator_config,
bos_token_id=0,
decoder_start_token_id=2,
eos_token_id=2,
is_encoder_decoder=True,
pad_token_id=1,
vocab_size=50264,
title_sep=" / ",
doc_sep=" // ",
n_docs=5,
max_combined_length=300,
dataset="wiki_dpr",
dataset_split="train",
index_name="custom",
passages_path=self.dataset_path,
index_path=self.index_path,
use_dummy_dataset=True,
retrieval_vector_size=768,
retrieval_batch_size=8,
dataset_revision="b24a417",
)
@slow
def test_rag_sequence_from_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
rag_sequence = RagSequenceForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base",
"facebook/bart-large-cnn",
retriever=rag_retriever,
config=rag_config,
).to(torch_device)
# check that the from pretrained methods work
rag_sequence.save_pretrained(tmp_dirname)
rag_sequence.from_pretrained(tmp_dirname, retriever=rag_retriever)
rag_sequence.to(torch_device)
with torch.no_grad():
output = rag_sequence(
input_ids,
labels=decoder_input_ids,
)
loss_pretrained = output.loss
del rag_sequence
question_encoder = AutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn")
rag_sequence = RagSequenceForGeneration(
config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever
)
rag_sequence.to(torch_device)
with torch.no_grad():
output = rag_sequence(
input_ids,
labels=decoder_input_ids,
)
loss_init = output.loss
self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4)
@slow
def test_rag_token_from_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(
"facebook/dpr-question_encoder-single-nq-base"
)
rag_retriever = RagRetriever(
rag_config,
question_encoder_tokenizer=rag_question_encoder_tokenizer,
generator_tokenizer=rag_decoder_tokenizer,
)
input_ids = rag_question_encoder_tokenizer(
"who sings does he love me with reba", return_tensors="pt"
).input_ids
decoder_input_ids = rag_decoder_tokenizer("Linda Davis", return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
decoder_input_ids = decoder_input_ids.to(torch_device)
with tempfile.TemporaryDirectory() as tmp_dirname:
rag_token = RagTokenForGeneration.from_pretrained_question_encoder_generator(
"facebook/dpr-question_encoder-single-nq-base",
"facebook/bart-large-cnn",
retriever=rag_retriever,
config=rag_config,
question_encoder_max_length=200,
generator_max_length=200,
).to(torch_device)
# check that the from pretrained methods work
rag_token.save_pretrained(tmp_dirname)
rag_token.from_pretrained(tmp_dirname, retriever=rag_retriever)
rag_token.to(torch_device)
self.assertTrue(rag_token.question_encoder.config.max_length == 200)
self.assertTrue(rag_token.generator.config.max_length == 200)
with torch.no_grad():
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
loss_pretrained = output.loss
del rag_token
question_encoder = AutoModel.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
generator = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn")
rag_token = RagTokenForGeneration(
config=rag_config, question_encoder=question_encoder, generator=generator, retriever=rag_retriever
)
rag_token.to(torch_device)
with torch.no_grad():
output = rag_token(
input_ids,
labels=decoder_input_ids,
)
loss_init = output.loss
self.assertAlmostEqual(loss_pretrained.item(), loss_init.item(), places=4)
| transformers/tests/models/rag/test_modeling_rag.py/0 | {
"file_path": "transformers/tests/models/rag/test_modeling_rag.py",
"repo_id": "transformers",
"token_count": 22724
} | 570 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
from transformers import AutoTokenizer, RobertaConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
from transformers.models.roberta.modeling_roberta import (
RobertaEmbeddings,
create_position_ids_from_input_ids,
)
from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4
ROBERTA_TINY = "sshleifer/tiny-distilroberta-base"
class RobertaModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return RobertaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
return config
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = RobertaModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = RobertaForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = RobertaForCausalLM(config=config).to(torch_device).eval()
# make sure that ids don't start with pad token
mask = input_ids.ne(config.pad_token_id).long()
input_ids = input_ids * mask
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
# make sure that ids don't start with pad token
mask = next_tokens.ne(config.pad_token_id).long()
next_tokens = next_tokens * mask
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = RobertaForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = RobertaForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RobertaForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class RobertaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaModel,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": RobertaModel,
"fill-mask": RobertaForMaskedLM,
"question-answering": RobertaForQuestionAnswering,
"text-classification": RobertaForSequenceClassification,
"text-generation": RobertaForCausalLM,
"token-classification": RobertaForTokenClassification,
"zero-shot": RobertaForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True
model_split_percents = [0.5, 0.8, 0.9]
def setUp(self):
self.model_tester = RobertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_decoder_model_past_with_large_inputs_relative_pos_emb(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
config_and_inputs[0].position_embedding_type = "relative_key"
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "FacebookAI/roberta-base"
model = RobertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_create_position_ids_respects_padding_index(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = RobertaEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
def test_create_position_ids_from_inputs_embeds(self):
"""This is a regression test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is RobertaEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = RobertaEmbeddings(config=config)
inputs_embeds = torch.empty(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
@require_torch
class RobertaModelIntegrationTest(TestCasePlus):
@slow
def test_inference_masked_lm(self):
model = RobertaForMaskedLM.from_pretrained("FacebookAI/roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 11, 50265))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[33.8802, -4.3103, 22.7761], [4.6539, -2.8098, 13.6253], [1.8228, -3.6898, 8.8600]]]
)
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
# roberta.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_no_head(self):
model = RobertaModel.from_pretrained("FacebookAI/roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0231, 0.0782, 0.0074], [-0.1854, 0.0540, -0.0175], [0.0548, 0.0799, 0.1687]]]
)
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
# roberta.eval()
# expected_slice = roberta.extract_features(input_ids)[:, :3, :3].detach()
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_classification_head(self):
model = RobertaForSequenceClassification.from_pretrained("FacebookAI/roberta-large-mnli")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 3))
self.assertEqual(output.shape, expected_shape)
expected_tensor = torch.tensor([[-0.9469, 0.3913, 0.5118]])
# roberta = torch.hub.load('pytorch/fairseq', 'roberta.large.mnli')
# roberta.eval()
# expected_tensor = roberta.predict("mnli", input_ids, return_logits=True).detach()
torch.testing.assert_close(output, expected_tensor, rtol=1e-4, atol=1e-4)
@pytest.mark.torch_export_test
@slow
def test_export(self):
if not is_torch_greater_or_equal_than_2_4:
self.skipTest(reason="This test requires torch >= 2.4 to run.")
roberta_model = "FacebookAI/roberta-base"
device = "cpu"
attn_implementation = "sdpa"
max_length = 512
tokenizer = AutoTokenizer.from_pretrained(roberta_model)
inputs = tokenizer(
"The goal of life is <mask>.",
return_tensors="pt",
padding="max_length",
max_length=max_length,
)
model = RobertaForMaskedLM.from_pretrained(
roberta_model,
device_map=device,
attn_implementation=attn_implementation,
use_cache=True,
)
logits = model(**inputs).logits
eager_predicted_mask = tokenizer.decode(logits[0, 6].topk(5).indices)
self.assertEqual(eager_predicted_mask.split(), ["happiness", "love", "peace", "freedom", "simplicity"])
exported_program = torch.export.export(
model,
args=(inputs["input_ids"],),
kwargs={"attention_mask": inputs["attention_mask"]},
strict=True,
)
result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"])
exported_predicted_mask = tokenizer.decode(result.logits[0, 6].topk(5).indices)
self.assertEqual(eager_predicted_mask, exported_predicted_mask)
| transformers/tests/models/roberta/test_modeling_roberta.py/0 | {
"file_path": "transformers/tests/models/roberta/test_modeling_roberta.py",
"repo_id": "transformers",
"token_count": 11138
} | 571 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Hubert model."""
import math
import unittest
import pytest
from transformers import SEWConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torchcodec, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
Wav2Vec2FeatureExtractor,
Wav2Vec2Processor,
)
from transformers.models.hubert.modeling_hubert import _compute_mask_indices
class SEWModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=32,
feat_extract_norm="group",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(64, 32, 32),
conv_stride=(5, 2, 1),
conv_kernel=(10, 3, 1),
conv_bias=False,
num_conv_pos_embeddings=31,
num_conv_pos_embedding_groups=2,
squeeze_factor=2,
num_hidden_layers=2,
num_attention_heads=2,
hidden_dropout=0.1,
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.squeeze_factor = squeeze_factor
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length // self.squeeze_factor
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_values, attention_mask
def get_config(self):
return SEWConfig(
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
squeeze_factor=self.squeeze_factor,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout=self.hidden_dropout,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
)
def create_and_check_model(self, config, input_values, attention_mask):
model = SEWModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_values, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def check_ctc_loss(self, config, input_values, *args):
model = SEWForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
self.parent.assertTrue(isinstance(sum_loss, float))
self.parent.assertTrue(isinstance(mean_loss, float))
def check_ctc_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = SEWForCTC(config=config)
model.to(torch_device)
model.train()
# freeze feature encoder
model.freeze_feature_encoder()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
if max_length_labels[i] < labels.shape[-1]:
# it's important that we make sure that target lengths are at least
# one shorter than logit lengths to prevent -inf
labels[i, max_length_labels[i] - 1 :] = -100
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_seq_classifier_loss(self, config, input_values, *args):
model = SEWForSequenceClassification(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
unmasked_loss = model(input_values, labels=labels).loss.item()
self.parent.assertTrue(isinstance(masked_loss, float))
self.parent.assertTrue(isinstance(unmasked_loss, float))
self.parent.assertTrue(masked_loss != unmasked_loss)
def check_seq_classifier_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = SEWForSequenceClassification(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_labels_out_of_vocab(self, config, input_values, *args):
model = SEWForCTC(config)
model.to(torch_device)
model.train()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100)
with pytest.raises(ValueError):
model(input_values, labels=labels)
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class SEWModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (SEWForCTC, SEWModel, SEWForSequenceClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{
"audio-classification": SEWForSequenceClassification,
"automatic-speech-recognition": SEWForCTC,
"feature-extraction": SEWModel,
}
if is_torch_available()
else {}
)
test_pruning = False
test_headmasking = False
def setUp(self):
self.model_tester = SEWModelTester(self)
self.config_tester = ConfigTester(self, config_class=SEWConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
@unittest.skip(reason="Sew has no inputs_embeds.")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Sew has input_values instead of input_ids.")
def test_forward_signature(self):
pass
@unittest.skip(reason="Sew has no token embeddings.")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Sew has no inputs_embeds.")
def test_model_get_set_embeddings(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_seq_classifier_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_loss(*config_and_inputs)
def test_seq_classifier_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_training(*config_and_inputs)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
uniform_init_parms = [
"conv.parametrizations.weight",
"conv.weight",
"masked_spec_embed",
"quantizer.weight_proj.weight",
]
if param.requires_grad:
if any(x in name for x in uniform_init_parms):
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.data.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = SEWModel.from_pretrained("asapp/sew-tiny-100k")
self.assertIsNotNone(model)
@require_torch
class SEWUtilsTest(unittest.TestCase):
def test_compute_mask_indices(self):
batch_size = 4
sequence_length = 60
mask_prob = 0.5
mask_length = 1
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
mask = torch.from_numpy(mask).to(torch_device)
self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)])
def test_compute_mask_indices_overlap(self):
batch_size = 4
sequence_length = 80
mask_prob = 0.5
mask_length = 4
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
mask = torch.from_numpy(mask).to(torch_device)
# because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal
for batch_sum in mask.sum(axis=-1):
self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)
@require_torch
@require_torchcodec
@slow
class SEWModelIntegrationTest(unittest.TestCase):
def _load_datasamples(self, num_samples):
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").filter(
lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]
)[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_inference_pretrained_batched(self):
model = SEWModel.from_pretrained("asapp/sew-tiny-100k").to(torch_device)
processor = Wav2Vec2FeatureExtractor.from_pretrained("asapp/sew-tiny-100k")
input_speech = self._load_datasamples(2)
inputs = processor(input_speech, return_tensors="pt", padding=True)
input_values = inputs.input_values.to(torch_device)
with torch.no_grad():
outputs = model(input_values).last_hidden_state
# expected outputs taken from the original SEW implementation
expected_outputs_first = torch.tensor(
[
[
[0.1509, 0.5372, 0.3061, -0.1694],
[-0.1700, 0.5764, 0.2753, -0.1299],
[0.1281, 0.7949, 0.2342, -0.1624],
[-0.1627, 0.6710, 0.2215, -0.1317],
],
[
[0.0408, 1.4355, 0.8605, -0.0968],
[0.0393, 1.2368, 0.6826, 0.0364],
[-0.1269, 1.9215, 1.1677, -0.1297],
[-0.1654, 1.6524, 0.6877, -0.0196],
],
],
device=torch_device,
)
expected_outputs_last = torch.tensor(
[
[
[1.3379, -0.1450, -0.1500, -0.0515],
[0.8364, -0.1680, -0.1248, -0.0689],
[1.2791, -0.1507, -0.1523, -0.0564],
[0.8208, -0.1690, -0.1199, -0.0751],
],
[
[0.6959, -0.0861, -0.1235, -0.0861],
[0.4700, -0.1686, -0.1141, -0.1199],
[1.0776, -0.1137, -0.0124, -0.0472],
[0.5774, -0.1675, -0.0376, -0.0823],
],
],
device=torch_device,
)
expected_output_sum = 62146.7422
torch.testing.assert_close(outputs[:, :4, :4], expected_outputs_first, rtol=5e-3, atol=5e-3)
torch.testing.assert_close(outputs[:, -4:, -4:], expected_outputs_last, rtol=5e-3, atol=5e-3)
self.assertTrue(abs(outputs.sum() - expected_output_sum) < 5)
def test_inference_ctc_batched(self):
model = SEWForCTC.from_pretrained("asapp/sew-tiny-100k-ft-ls100h").to(torch_device)
processor = Wav2Vec2Processor.from_pretrained("asapp/sew-tiny-100k-ft-ls100h", do_lower_case=True)
input_speech = self._load_datasamples(2)
inputs = processor(input_speech, return_tensors="pt", padding=True)
input_values = inputs.input_values.to(torch_device)
with torch.no_grad():
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
predicted_trans = processor.batch_decode(predicted_ids)
EXPECTED_TRANSCRIPTIONS = [
"a man said to the universe sir i exist",
"swet covered brian's body trickling into the tightloine closs hat was the only garment he wore",
]
self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
| transformers/tests/models/sew/test_modeling_sew.py/0 | {
"file_path": "transformers/tests/models/sew/test_modeling_sew.py",
"repo_id": "transformers",
"token_count": 9711
} | 572 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch UniSpeech model."""
import math
import unittest
import numpy as np
import pytest
from datasets import load_dataset
from transformers import UniSpeechConfig, is_torch_available
from transformers.testing_utils import is_flaky, require_torch, require_torchcodec, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
Wav2Vec2FeatureExtractor,
Wav2Vec2Processor,
)
class UniSpeechModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=16,
feat_extract_norm="group",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(32, 32, 32),
conv_stride=(4, 4, 4),
conv_kernel=(8, 8, 8),
conv_bias=False,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
num_hidden_layers=2,
num_attention_heads=2,
hidden_dropout_prob=0.1, # this is most likely not correctly set yet
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_values, attention_mask
def get_config(self):
return UniSpeechConfig(
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout_prob=self.hidden_dropout_prob,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
)
def create_and_check_model(self, config, input_values, attention_mask):
model = UniSpeechModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_values, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def create_and_check_batch_inference(self, config, input_values, *args):
# test does not pass for models making use of `group_norm`
# check: https://github.com/pytorch/fairseq/issues/3227
model = UniSpeechModel(config=config)
model.to(torch_device)
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0.0
batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state
for i in range(input_values.shape[0]):
input_slice = input_values[i : i + 1, : input_lengths[i]]
output = model(input_slice).last_hidden_state
batch_output = batch_outputs[i : i + 1, : output.shape[1]]
self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3))
def check_ctc_loss(self, config, input_values, *args):
model = UniSpeechForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
self.parent.assertTrue(isinstance(sum_loss, float))
self.parent.assertTrue(isinstance(mean_loss, float))
def check_seq_classifier_loss(self, config, input_values, *args):
model = UniSpeechForSequenceClassification(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
unmasked_loss = model(input_values, labels=labels).loss.item()
self.parent.assertTrue(isinstance(masked_loss, float))
self.parent.assertTrue(isinstance(unmasked_loss, float))
self.parent.assertTrue(masked_loss != unmasked_loss)
def check_ctc_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = UniSpeechForCTC(config=config)
model.to(torch_device)
model.train()
# freeze feature encoder
model.freeze_feature_encoder()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
if max_length_labels[i] < labels.shape[-1]:
# it's important that we make sure that target lengths are at least
# one shorter than logit lengths to prevent -inf
labels[i, max_length_labels[i] - 1 :] = -100
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_seq_classifier_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = UniSpeechForSequenceClassification(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_labels_out_of_vocab(self, config, input_values, *args):
model = UniSpeechForCTC(config)
model.to(torch_device)
model.train()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100)
with pytest.raises(ValueError):
model(input_values, labels=labels)
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class UniSpeechRobustModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(UniSpeechForCTC, UniSpeechModel, UniSpeechForSequenceClassification, UniSpeechForPreTraining)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"audio-classification": UniSpeechForSequenceClassification,
"automatic-speech-recognition": UniSpeechForCTC,
"feature-extraction": UniSpeechModel,
}
if is_torch_available()
else {}
)
test_pruning = False
test_headmasking = False
def setUp(self):
self.model_tester = UniSpeechModelTester(
self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True
)
self.config_tester = ConfigTester(self, config_class=UniSpeechConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@is_flaky(
description="The `codevector_idx` computed with `argmax()` in `UniSpeechGumbelVectorQuantizer.forward` is not stable."
)
def test_batching_equivalence(self):
super().test_batching_equivalence()
def test_batched_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_batch_inference(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_seq_classifier_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_seq_classifier_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# UniSpeech has no inputs_embeds
@unittest.skip(reason="UniSpeech has no inputs_embeds")
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
@unittest.skip(reason="UniSpeech has no inputs_embeds")
def test_forward_signature(self):
pass
# UniSpeech cannot resize token embeddings
# since it has no tokens embeddings
@unittest.skip(reason="UniSpeech has no tokens embeds")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="UniSpeech has no inputs_embeds")
def test_model_get_set_embeddings(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
uniform_init_parms = [
"conv.weight",
"conv.parametrizations.weight",
"masked_spec_embed",
"codevectors",
"quantizer.weight_proj.weight",
"project_hid.weight",
"project_hid.bias",
"project_q.weight",
"project_q.bias",
"feature_projection.projection.weight",
"feature_projection.projection.bias",
]
if param.requires_grad:
if any(x in name for x in uniform_init_parms):
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.data.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.fill_(3)
if hasattr(module, "codevectors") and module.codevectors is not None:
module.codevectors.data.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
def test_mask_feature_prob_ctc(self):
model = UniSpeechForCTC.from_pretrained(
"hf-internal-testing/tiny-random-unispeech", mask_feature_prob=0.2, mask_feature_length=2
)
model.to(torch_device).train()
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-unispeech", return_attention_mask=True
)
batch_duration_in_seconds = [1, 3, 2, 6]
input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds]
batch = processor(
input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt"
)
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
self.assertEqual(logits.shape, (4, 1498, 32))
def test_mask_time_prob_ctc(self):
model = UniSpeechForCTC.from_pretrained(
"hf-internal-testing/tiny-random-unispeech", mask_time_prob=0.2, mask_time_length=2
)
model.to(torch_device).train()
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-unispeech", return_attention_mask=True
)
batch_duration_in_seconds = [1, 3, 2, 6]
input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds]
batch = processor(
input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt"
)
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
self.assertEqual(logits.shape, (4, 1498, 32))
def test_mask_time_feature_prob_ctc_single_batch(self):
model = UniSpeechForCTC.from_pretrained(
"hf-internal-testing/tiny-random-unispeech",
mask_time_prob=0.2,
mask_feature_prob=0.2,
mask_time_length=2,
mask_feature_length=2,
)
model.to(torch_device).train()
processor = Wav2Vec2Processor.from_pretrained(
"hf-internal-testing/tiny-random-unispeech", return_attention_mask=True
)
batch_duration_in_seconds = [6]
input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds]
batch = processor(
input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt"
)
logits = model(
input_values=batch["input_values"].to(torch_device),
attention_mask=batch["attention_mask"].to(torch_device),
).logits
self.assertEqual(logits.shape, (1, 1498, 32))
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = UniSpeechModel.from_pretrained("microsoft/unispeech-large-1500h-cv")
self.assertIsNotNone(model)
@require_torch
@require_torchcodec
@slow
class UniSpeechModelIntegrationTest(unittest.TestCase):
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").filter(
lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]
)[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _load_superb(self, task, num_samples):
ds = load_dataset("anton-l/superb_dummy", task, split="test")
return ds[:num_samples]
def test_inference_pretraining(self):
model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv")
model.to(torch_device)
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-large-xlsr-53")
input_speech = self._load_datasamples(2)
inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True)
with torch.no_grad():
torch.manual_seed(0)
outputs = model(
inputs_dict.input_values.to(torch_device),
attention_mask=inputs_dict.attention_mask.to(torch_device),
)
# compute cosine similarity
cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1)
# pretrained model should have learned a high cosine similarity
self.assertTrue(cosine_sim.mean() > 0.5)
# fmt: off
expected_cosine_sim_slice = torch.tensor(
[[0.8290, 0.8335, 0.8815, 0.8580, 0.8249],
[0.8892, 0.9221, 0.8711, 0.8601, 0.8482]],
device=torch_device,
)
# fmt: on
torch.testing.assert_close(cosine_sim[:, :5], expected_cosine_sim_slice, rtol=1e-3, atol=1e-3)
| transformers/tests/models/unispeech/test_modeling_unispeech.py/0 | {
"file_path": "transformers/tests/models/unispeech/test_modeling_unispeech.py",
"repo_id": "transformers",
"token_count": 10615
} | 573 |
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch ViLT model."""
import unittest
from datasets import load_dataset
from packaging import version
from transformers import ViltConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltForTokenClassification,
ViltModel,
)
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
if is_vision_available():
import PIL
from PIL import Image
from transformers import ViltProcessor
class ViltModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
scope=None,
modality_type_vocab_size=2,
add_multiple_images=False,
num_images=-1,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scope = scope
self.modality_type_vocab_size = modality_type_vocab_size
self.add_multiple_images = add_multiple_images
self.num_images = num_images
# we set the expected sequence length (which is used in several tests)
# this is equal to the seq length of the text tokens + number of image patches + 1 for the CLS token
self.expected_seq_len = self.seq_length + (self.image_size // self.patch_size) ** 2 + 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
if self.add_multiple_images:
pixel_values = floats_tensor([self.batch_size, 2, self.num_channels, self.image_size, self.image_size])
else:
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
return (config, input_ids, token_type_ids, input_mask, pixel_values, token_labels)
def get_config(self):
return ViltConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
num_labels=self.num_labels,
modality_type_vocab_size=self.modality_type_vocab_size,
num_images=self.num_images,
)
def create_and_check_model(
self,
config,
input_ids,
token_type_ids,
input_mask,
pixel_values,
token_labels,
):
model = ViltModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, pixel_values=pixel_values)
result = model(input_ids, token_type_ids=token_type_ids, pixel_values=pixel_values)
result = model(input_ids, pixel_values=pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size)
)
def create_and_check_for_token_classification(
self,
config,
input_ids,
token_type_ids,
input_mask,
pixel_values,
token_labels,
):
model = ViltForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, pixel_values=pixel_values)
result = model(input_ids, token_type_ids=token_type_ids, pixel_values=pixel_values)
result = model(input_ids, pixel_values=pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
pixel_values,
token_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
"pixel_values": pixel_values,
}
return config, inputs_dict
def prepare_pixel_values(self):
return floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
@require_torch
class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
ViltModel,
ViltForQuestionAnswering,
ViltForImageAndTextRetrieval,
ViltForMaskedLM,
ViltForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"image-feature-extraction": ViltModel, "visual-question-answering": ViltForQuestionAnswering}
if is_torch_available()
else {}
)
test_pruning = False
test_headmasking = False
test_torchscript = False
model_split_percents = [0.5, 0.8, 0.9]
# ViltForMaskedLM, ViltForQuestionAnswering and ViltForImagesAndTextClassification require special treatment
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "ViltForQuestionAnswering":
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, self.model_tester.num_labels, device=torch_device
)
elif model_class.__name__ in ["ViltForMaskedLM", "ViltForTokenClassification"]:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
elif model_class.__name__ == "ViltForImagesAndTextClassification":
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = ViltModelTester(self)
self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_training(self):
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False.")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
if model_class.__name__ == "ViltForImagesAndTextClassification":
config.modality_type_vocab_size = 3
# ViltForImageAndTextRetrieval doesn't support training for now
if model_class.__name__ in [*MODEL_MAPPING_NAMES.values(), "ViltForImageAndTextRetrieval"]:
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
for k, v in inputs.items():
print(k, v.shape)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False.")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
# ViltForImageAndTextRetrieval doesn't support training for now
if (
model_class.__name__ in [*MODEL_MAPPING_NAMES.values(), "ViltForImageAndTextRetrieval"]
or not model_class.supports_gradient_checkpointing
):
continue
model = model_class(config)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_save_load(self):
pass
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_determinism(self):
pass
@unittest.skip(
"VilT samples image tokens from a multinomial distribution, resulting in not deterministic hidden states"
)
def test_batching_equivalence(self):
pass
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_model_outputs_equivalence(self):
pass
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states. Cannot test equivalence on logit level"""
)
def test_inputs_embeds_matches_input_ids(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "expected_seq_len", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
# attentions are a list of length num_images
# each element contains the attentions of a particular image index
self.assertEqual(len(attentions), self.model_tester.num_images)
self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers)
else:
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
# attentions are a list of length num_images
# each element contains the attentions of a particular image index
self.assertEqual(len(attentions), self.model_tester.num_images)
self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers)
else:
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertListEqual(
list(attentions[0][0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertEqual(len(self_attentions), self.model_tester.num_images)
self.assertEqual(len(self_attentions[0]), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0][0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
else:
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
self.assertEqual(len(hidden_states), self.model_tester.num_images)
self.assertEqual(len(hidden_states[0]), expected_num_layers)
else:
self.assertEqual(len(hidden_states), expected_num_layers)
seq_length = self.model_tester.expected_seq_len
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertListEqual(
list(hidden_states[0][0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
else:
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
print("Model class:", model_class)
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
hidden_states[0].retain_grad()
attentions[0].retain_grad()
else:
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
self.assertIsNotNone(hidden_states[0].grad)
self.assertIsNotNone(attentions[0].grad)
else:
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
@slow
def test_model_from_pretrained(self):
model_name = "dandelin/vilt-b32-mlm"
model = ViltModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class ViltForImagesAndTextClassificationModelTest(ViltModelTest, unittest.TestCase):
all_model_classes = (ViltForImagesAndTextClassification,) if is_torch_available() else ()
def setUp(self):
self.model_tester = ViltModelTester(self, modality_type_vocab_size=3, add_multiple_images=True, num_images=2)
self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)
@unittest.skip(reason="We only test the model that takes in multiple images")
def test_model(self):
pass
@unittest.skip(reason="We only test the model that takes in multiple images")
def test_for_token_classification(self):
pass
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class ViltModelIntegrationTest(unittest.TestCase):
@cached_property
def default_processor(self):
return ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa") if is_vision_available() else None
@slow
def test_inference_masked_lm(self):
model = ViltForMaskedLM.from_pretrained("dandelin/vilt-b32-mlm").to(torch_device)
processor = self.default_processor
image = prepare_img()
text = "a bunch of [MASK] laying on a [MASK]."
inputs = processor(image, text, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size([1, 11, 30522])
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-12.5061, -12.5123, -12.5174]).to(torch_device)
torch.testing.assert_close(outputs.logits[0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify masked token prediction equals "cats"
predicted_id = outputs.logits[0, 4, :].argmax(-1).item()
assert processor.decode([predicted_id]) == "cats"
@slow
def test_inference_visual_question_answering(self):
model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa").to(torch_device)
processor = self.default_processor
image = prepare_img()
text = "How many cats are there?"
inputs = processor(image, text, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 3129))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-15.9495, -18.1472, -10.3041]).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# compute loss
vqa_labels = [[2, 3, 155, 800]]
vqa_scores = [[1.0, 0.3, 0.3, 0.3]]
labels = torch.zeros(1, model.config.num_labels).to(torch_device)
for i, (labels_example, scores_example) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(labels_example, scores_example):
labels[i, l] = s
# forward pass
outputs = model(**inputs, labels=labels)
# verify we have a positive loss
self.assertTrue(outputs.loss > 0)
@slow
def test_inference_natural_language_visual_reasoning(self):
model = ViltForImagesAndTextClassification.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2").to(
torch_device
)
processor = self.default_processor
dataset = load_dataset("hf-internal-testing/fixtures_nlvr2", split="train")
image1 = dataset[0]["image"]
image2 = dataset[1]["image"]
text = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
encoding_1 = processor(image1, text, return_tensors="pt")
encoding_2 = processor(image2, text, return_tensors="pt")
pixel_values = torch.stack([encoding_1.pixel_values, encoding_2.pixel_values], dim=1)
# forward pass
outputs = model(
input_ids=encoding_1.input_ids.to(torch_device),
pixel_values=pixel_values.to(torch_device),
)
# verify the logits
expected_shape = torch.Size([1, 2])
self.assertEqual(outputs.logits.shape, expected_shape)
is_pillow_less_than_9 = version.parse(PIL.__version__) < version.parse("9.0.0")
if is_pillow_less_than_9:
expected_slice = torch.tensor(
[-2.4013, 2.9342],
device=torch_device,
)
else:
expected_slice = torch.tensor(
[-2.3713, 2.9168],
device=torch_device,
)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
| transformers/tests/models/vilt/test_modeling_vilt.py/0 | {
"file_path": "transformers/tests/models/vilt/test_modeling_vilt.py",
"repo_id": "transformers",
"token_count": 12166
} | 574 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch ViViT model."""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VivitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VivitForVideoClassification, VivitModel
if is_vision_available():
from transformers import VivitImageProcessor
class VivitModelTester:
def __init__(
self,
parent,
batch_size=2,
is_training=True,
use_labels=True,
num_labels=10,
image_size=10,
num_frames=8, # decreased, because default 32 takes too much RAM at inference
tubelet_size=[2, 4, 4],
num_channels=3,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu_fast",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-06,
qkv_bias=True,
scope=None,
attn_implementation="eager",
mask_ratio=0.5,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_labels = use_labels
self.num_labels = num_labels
self.image_size = image_size
self.num_frames = num_frames
self.tubelet_size = tubelet_size
self.num_channels = num_channels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.scope = scope
self.attn_implementation = attn_implementation
self.seq_length = (
(self.image_size // self.tubelet_size[2])
* (self.image_size // self.tubelet_size[1])
* (self.num_frames // self.tubelet_size[0])
) + 1 # CLS token
self.mask_ratio = mask_ratio
self.num_masks = int(mask_ratio * self.seq_length)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size]
)
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
config = VivitConfig(
num_frames=self.num_frames,
image_size=self.image_size,
tubelet_size=self.tubelet_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
layer_norm_eps=self.layer_norm_eps,
qkv_bias=self.qkv_bias,
attn_implementation=self.attn_implementation,
)
config.num_labels = self.num_labels
return config
def create_and_check_model(self, config, pixel_values, labels):
model = VivitModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_video_classification(self, config, pixel_values, labels):
model = VivitForVideoClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify the logits shape
expected_shape = torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape, expected_shape)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class VivitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Vivit does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (VivitModel, VivitForVideoClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": VivitModel, "video-classification": VivitForVideoClassification}
if is_torch_available()
else {}
)
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
test_torch_exportable = True
def setUp(self):
self.model_tester = VivitModelTester(self)
self.config_tester = ConfigTester(self, config_class=VivitConfig, has_text_modality=False, hidden_size=37)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
if model_class in get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING):
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Vivit does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values", "head_mask"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_video_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "google/vivit-b-16x2-kinetics400"
model = VivitModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
seq_len = self.model_tester.seq_length
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(hidden_states), expected_num_layers)
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# We will verify our results on a video of eating spaghetti
# Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227]
def prepare_video():
file = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_32_frames.npy", repo_type="dataset"
)
video = np.load(file)
return list(video)
@require_torch
@require_vision
class VivitModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return VivitImageProcessor() if is_vision_available() else None
@slow
def test_inference_for_video_classification(self):
model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400").to(torch_device)
image_processor = self.default_image_processor
video = prepare_video()
inputs = image_processor(video, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 400))
self.assertEqual(outputs.logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [-0.9498, 2.7971, -1.4049, 0.1024, -1.8353],
("cuda", 8): [-0.9498, 2.7971, -1.4049, 0.1025, -1.8353],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :5], expected_slice, rtol=2e-4, atol=2e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
# Vivit models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400").to(torch_device)
image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
video = prepare_video()
inputs = image_processor(
video, size={"shortest_edge": 480}, crop_size={"height": 232, "width": 232}, return_tensors="pt"
)
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(pixel_values, interpolate_pos_encoding=True)
# verify the logits shape
expected_shape = torch.Size((1, 3137, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
| transformers/tests/models/vivit/test_modeling_vivit.py/0 | {
"file_path": "transformers/tests/models/vivit/test_modeling_vivit.py",
"repo_id": "transformers",
"token_count": 6496
} | 575 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Wav2Vec2Phoneme tokenizer."""
import json
import os
import unittest
from functools import lru_cache
from transformers import Wav2Vec2PhonemeCTCTokenizer
from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES
from transformers.models.wav2vec2_phoneme.tokenization_wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible
@require_phonemizer
class Wav2Vec2PhonemeCTCTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "facebook/wav2vec2-lv-60-espeak-cv-ft"
tokenizer_class = Wav2Vec2PhonemeCTCTokenizer
test_rust_tokenizer = False
@classmethod
def setUpClass(cls):
super().setUpClass()
vocab = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" ")
vocab_tokens = dict(zip(vocab, range(len(vocab))))
cls.special_tokens_map = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(cls.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
# overwrite since phonemes require specific creation
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> tuple[str, list]:
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], do_phonemize=False), toks))
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
# toks_str = [t[1] for t in toks]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
if with_prefix_space:
output_txt = " " + output_txt
output_ids = tokenizer.encode(output_txt, add_special_tokens=False)
return output_txt, output_ids
@classmethod
@use_cache_if_possible
@lru_cache(maxsize=64)
def get_tokenizer(cls, pretrained_name=None, **kwargs):
kwargs.update(cls.special_tokens_map)
pretrained_name = pretrained_name or cls.tmpdirname
return Wav2Vec2PhonemeCTCTokenizer.from_pretrained(pretrained_name, **kwargs)
def test_tokenizer_add_new_tokens(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
# check adding a single token
tokenizer.add_tokens("xxx")
token_ids = tokenizer("m xxx ɪ", do_phonemize=False).input_ids
self.assertEqual(token_ids, [13, 392, 17]) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"])
token_ids = tokenizer("m aaa ɪ ccc", do_phonemize=False).input_ids
self.assertEqual(token_ids, [13, 393, 17, 395]) # aaa and ccc should be after xxx and 2 after aaa
token_ids = tokenizer("maɪ c", do_phonemize=False).input_ids
self.assertEqual(token_ids, [3, 200]) # mai should be <unk> (=3)
def test_phonemize(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(phonemes, "h ə l oʊ h aʊ ɑːɹ j uː")
def test_encode(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids)
def test_encode_decode(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids)
self.assertEqual(phonemes, phonemes_enc_dec)
def test_decode(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
tokens = tokenizer.decode(sample_ids[0])
batch_tokens = tokenizer.batch_decode(sample_ids)
self.assertEqual(tokens, batch_tokens[0])
self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
def test_phonemize_with_word_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(phonemes, "h ə l oʊ | h aʊ | ɑːɹ | j uː |")
def test_encode_with_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
self.assertEqual(tokenizer(input_text).input_ids, tokenizer(phonemes, do_phonemize=False).input_ids)
def test_decode_with_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
# fmt: off
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
tokens = tokenizer.decode(sample_ids[0])
batch_tokens = tokenizer.batch_decode(sample_ids)
self.assertEqual(tokens, batch_tokens[0])
self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"])
# decode with no word_del_token filter
tokens = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=False)
batch_tokens = tokenizer.batch_decode(sample_ids, filter_word_delimiter_token=False)
self.assertEqual(tokens, batch_tokens[0])
self.assertEqual(batch_tokens, ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"])
def test_encode_decode_with_del(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=False)
self.assertEqual(phonemes, phonemes_enc_dec)
def test_encode_decode_with_del_filter(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|"
)
tokenizer.add_tokens("|")
input_text = "Hello how are you"
phonemes = tokenizer.phonemize(input_text, phonemizer_lang="en-us")
phonemes_enc_dec = tokenizer.decode(tokenizer(input_text).input_ids, filter_word_delimiter_token=True)
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |")]).strip(), phonemes_enc_dec)
def test_change_phonemizer_lang(self):
tokenizer = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token=None
)
input_text = "Hello how are you"
input_ids_en = tokenizer(input_text, phonemizer_lang="en-us").input_ids
input_ids_fr = tokenizer(input_text, phonemizer_lang="fr-fr").input_ids
self.assertNotEqual(input_ids_en, input_ids_fr)
text_en = tokenizer.decode(input_ids_en)
text_fr = tokenizer.decode(input_ids_fr)
self.assertEqual(text_en, "h ə l oʊ h aʊ ɑːɹ j uː")
self.assertEqual(text_fr, "ɛ l o h aʊ a ʁ j u")
def test_case_insensitive(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
input_text_up = "Hello how Are you"
input_text_low = "hello how are you"
input_ids_up = tokenizer(input_text_up).input_ids
input_ids_low = tokenizer(input_text_low).input_ids
self.assertEqual(input_ids_up, input_ids_low)
def test_tokenizer_decode_added_tokens(self):
tokenizer = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft")
tokenizer.add_tokens(["!", "?"])
tokenizer.add_special_tokens({"cls_token": "$$$"})
# fmt: off
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
batch_tokens = tokenizer.batch_decode(sample_ids)
self.assertEqual(batch_tokens, ["k s ɾ ɾ l ɭʲ ! ? ! ? $$$", "j ð s j ð s oːɹ $$$"])
@staticmethod
def get_from_offsets(offsets, key):
retrieved_list = [d[key] for d in offsets]
return retrieved_list
def test_offsets(self):
tokenizer = self.get_tokenizer(word_delimiter_token="|")
tokenizer.add_tokens("|")
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
sample_ids = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
outputs = tokenizer.decode(sample_ids, output_char_offsets=True, filter_word_delimiter_token=False)
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys()), 2)
self.assertTrue("text" in outputs)
self.assertTrue("char_offsets" in outputs)
self.assertTrue(isinstance(outputs, Wav2Vec2PhonemeCTCTokenizerOutput))
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"], "char")), outputs.text)
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "char"), ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"]
)
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "start_offset"), [0, 1, 4, 7, 9, 11, 12, 15, 16]
)
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "end_offset"), [1, 4, 6, 9, 10, 12, 15, 16, 17]
)
def test_offsets_batch(self):
tokenizer = self.get_tokenizer(word_delimiter_token="|")
def check_list_tuples_equal(outputs_batch, outputs_list):
self.assertTrue(isinstance(outputs_batch, Wav2Vec2PhonemeCTCTokenizerOutput))
self.assertTrue(isinstance(outputs_list[0], Wav2Vec2PhonemeCTCTokenizerOutput))
# transform list to ModelOutput
outputs_batch_2 = Wav2Vec2PhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]}
)
self.assertListEqual(outputs_batch["text"], outputs_batch_2["text"])
def recursive_check(list_or_dict_1, list_or_dict_2):
if isinstance(list_or_dict_1, list):
[recursive_check(l1, l2) for l1, l2 in zip(list_or_dict_1, list_or_dict_2)]
self.assertEqual(list_or_dict_1, list_or_dict_2)
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"], outputs_batch_2["char_offsets"])
# fmt: off
sample_ids = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
outputs_char_batch = tokenizer.batch_decode(sample_ids, output_char_offsets=True)
outputs_char = [tokenizer.decode(ids, output_char_offsets=True) for ids in sample_ids]
check_list_tuples_equal(outputs_char_batch, outputs_char)
@unittest.skip(reason="Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes")
def test_added_tokens_do_lower_case(self):
pass
@unittest.skip(reason="Wav2Vec2PhonemeTokenizer always puts spaces between phonemes")
def test_encode_decode_with_spaces(self):
pass
@unittest.skip(
reason="encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency"
)
def test_internal_consistency(self):
pass
@unittest.skip(reason="Wav2Vec2PhonemeModel has no max model length => no testing")
def test_add_tokens_tokenizer(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=False
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3], tokens[-4])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-3], tokenizer.pad_token_id)
@unittest.skip(reason="The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def test_tf_encode_plus_sent_to_model(self):
pass
@unittest.skip(reason="The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def test_torch_encode_plus_sent_to_model(self):
pass
def test_convert_tokens_to_string_format(self):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
tokenizers = self.get_tokenizers(fast=True, do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokens = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
output = tokenizer.convert_tokens_to_string(tokens)
self.assertIsInstance(output["text"], str)
| transformers/tests/models/wav2vec2_phoneme/test_tokenization_wav2vec2_phoneme.py/0 | {
"file_path": "transformers/tests/models/wav2vec2_phoneme/test_tokenization_wav2vec2_phoneme.py",
"repo_id": "transformers",
"token_count": 10074
} | 576 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class XGLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "facebook/xglm-564M"
tokenizer_class = XGLMTokenizer
rust_tokenizer_class = XGLMTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
@classmethod
def setUpClass(cls):
super().setUpClass()
# We have a SentencePiece fixture for testing
tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(cls.tmpdirname)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<pad>"
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<s>")
self.assertEqual(vocab_keys[1], "<pad>")
self.assertEqual(len(vocab_keys), 1_008)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 1_008)
def test_full_tokenizer(self):
tokenizer = XGLMTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
@cached_property
def big_tokenizer(self):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def test_picklable_without_disk(self):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SAMPLE_VOCAB, f.name)
tokenizer = XGLMTokenizer(f.name, keep_accents=True)
pickled_tokenizer = pickle.dumps(tokenizer)
pickle.loads(pickled_tokenizer)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "I was born in 92000, and this is falsé."
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
@slow
def test_tokenization_base_easy_symbols(self):
symbols = "Hello World!"
original_tokenizer_encodings = [2, 31227, 4447, 35]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@slow
def test_tokenization_base_hard_symbols(self):
symbols = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
" add words that should not exist and be tokenized to unk, such as saoneuhaoesuth"
)
original_tokenizer_encodings = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: skip
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@slow
def test_tokenizer_integration(self):
# fmt: off
expected_encoding = {
'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="facebook/xglm-564M",
padding=False,
)
| transformers/tests/models/xglm/test_tokenization_xglm.py/0 | {
"file_path": "transformers/tests/models/xglm/test_tokenization_xglm.py",
"repo_id": "transformers",
"token_count": 4240
} | 577 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import datasets
import numpy as np
from huggingface_hub import AudioClassificationOutputElement
from transformers import (
MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
is_torch_available,
)
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
compare_pipeline_output_to_hub_spec,
is_pipeline_test,
nested_simplify,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class AudioClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_dataset = None
@classmethod
def _load_dataset(cls):
# Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process.
if cls._dataset is None:
cls._dataset = datasets.load_dataset(
"hf-internal-testing/librispeech_asr_dummy", "clean", split="validation"
)
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
audio_classifier = AudioClassificationPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
)
# test with a raw waveform
audio = np.zeros((34000,))
audio2 = np.zeros((14000,))
return audio_classifier, [audio2, audio]
def run_pipeline_test(self, audio_classifier, examples):
audio2, audio = examples
output = audio_classifier(audio)
# by default a model is initialized with num_labels=2
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
output = audio_classifier(audio, top_k=1)
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
],
)
self.run_torchaudio(audio_classifier)
for single_output in output:
compare_pipeline_output_to_hub_spec(single_output, AudioClassificationOutputElement)
@require_torchaudio
def run_torchaudio(self, audio_classifier):
self._load_dataset()
# test with a local file
audio = self._dataset[0]["audio"]["array"]
output = audio_classifier(audio)
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
@require_torch
def test_small_model_pt(self):
model = "anton-l/wav2vec2-random-tiny-classifier"
audio_classifier = pipeline("audio-classification", model=model)
audio = np.ones((8000,))
output = audio_classifier(audio, top_k=4)
EXPECTED_OUTPUT = [
{"score": 0.0842, "label": "no"},
{"score": 0.0838, "label": "up"},
{"score": 0.0837, "label": "go"},
{"score": 0.0834, "label": "right"},
]
EXPECTED_OUTPUT_PT_2 = [
{"score": 0.0845, "label": "stop"},
{"score": 0.0844, "label": "on"},
{"score": 0.0841, "label": "right"},
{"score": 0.0834, "label": "left"},
]
self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
audio_dict = {"array": np.ones((8000,)), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
output = audio_classifier(audio_dict, top_k=4)
self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
@require_torch
def test_small_model_pt_fp16(self):
model = "anton-l/wav2vec2-random-tiny-classifier"
audio_classifier = pipeline("audio-classification", model=model, dtype=torch.float16)
audio = np.ones((8000,))
output = audio_classifier(audio, top_k=4)
# Expected outputs are collected running the test on torch 2.6 in few scenarios.
# Running on CUDA T4/A100 and on XPU PVC (note: using stock torch xpu, NOT using IPEX):
EXPECTED_OUTPUT = [
{"score": 0.0833, "label": "go"},
{"score": 0.0833, "label": "off"},
{"score": 0.0833, "label": "stop"},
{"score": 0.0833, "label": "on"},
]
# Running on CPU:
EXPECTED_OUTPUT_PT_2 = [
{"score": 0.0839, "label": "no"},
{"score": 0.0837, "label": "go"},
{"score": 0.0836, "label": "yes"},
{"score": 0.0835, "label": "right"},
]
self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
audio_dict = {"array": np.ones((8000,)), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
output = audio_classifier(audio_dict, top_k=4)
self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
@require_torch
@slow
def test_large_model_pt(self):
model = "superb/wav2vec2-base-superb-ks"
audio_classifier = pipeline("audio-classification", model=model)
dataset = datasets.load_dataset("anton-l/superb_dummy", "ks", split="test")
audio = np.array(dataset[3]["speech"], dtype=np.float32)
output = audio_classifier(audio, top_k=4)
self.assertEqual(
nested_simplify(output, decimals=3),
[
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
],
)
@require_torch
@slow
def test_top_k_none_returns_all_labels(self):
model_name = "superb/wav2vec2-base-superb-ks" # model with more than 5 labels
classification_pipeline = pipeline(
"audio-classification",
model=model_name,
top_k=None,
)
# Create dummy input
sampling_rate = 16000
signal = np.zeros((sampling_rate,), dtype=np.float32)
result = classification_pipeline(signal)
num_labels = classification_pipeline.model.config.num_labels
self.assertEqual(len(result), num_labels, "Should return all labels when top_k is None")
@require_torch
@slow
def test_top_k_none_with_few_labels(self):
model_name = "superb/hubert-base-superb-er" # model with fewer labels
classification_pipeline = pipeline(
"audio-classification",
model=model_name,
top_k=None,
)
# Create dummy input
sampling_rate = 16000
signal = np.zeros((sampling_rate,), dtype=np.float32)
result = classification_pipeline(signal)
num_labels = classification_pipeline.model.config.num_labels
self.assertEqual(len(result), num_labels, "Should handle models with fewer labels correctly")
@require_torch
@slow
def test_top_k_greater_than_labels(self):
model_name = "superb/hubert-base-superb-er"
classification_pipeline = pipeline(
"audio-classification",
model=model_name,
top_k=100, # intentionally large number
)
# Create dummy input
sampling_rate = 16000
signal = np.zeros((sampling_rate,), dtype=np.float32)
result = classification_pipeline(signal)
num_labels = classification_pipeline.model.config.num_labels
self.assertEqual(len(result), num_labels, "Should cap top_k to number of labels")
| transformers/tests/pipelines/test_pipelines_audio_classification.py/0 | {
"file_path": "transformers/tests/pipelines/test_pipelines_audio_classification.py",
"repo_id": "transformers",
"token_count": 3940
} | 578 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
SummarizationPipeline,
TFPreTrainedModel,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_torch, slow, torch_device
from transformers.tokenization_utils import TruncationStrategy
from .test_pipelines_common import ANY
@is_pipeline_test
class SummarizationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
summarizer = SummarizationPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
max_new_tokens=20,
)
return summarizer, ["(CNN)The Palestinian Authority officially became", "Some other text"]
def run_pipeline_test(self, summarizer, _):
model = summarizer.model
outputs = summarizer("(CNN)The Palestinian Authority officially became")
self.assertEqual(outputs, [{"summary_text": ANY(str)}])
outputs = summarizer(
"(CNN)The Palestinian Authority officially became ",
num_beams=2,
min_length=2,
max_length=5,
)
self.assertEqual(outputs, [{"summary_text": ANY(str)}])
# Some models (Switch Transformers, LED, T5, LongT5, etc) can handle long sequences.
model_can_handle_longer_seq = [
"SwitchTransformersConfig",
"T5Config",
"LongT5Config",
"LEDConfig",
"PegasusXConfig",
"FSMTConfig",
"M2M100Config",
"ProphetNetConfig", # positional embeddings up to a fixed maximum size (otherwise clamping the values)
]
if model.config.__class__.__name__ not in model_can_handle_longer_seq:
# Too long and exception is expected.
# For TF models, if the weights are initialized in GPU context, we won't get expected index error from
# the embedding layer.
if not (
isinstance(model, TFPreTrainedModel)
and len(summarizer.model.trainable_weights) > 0
and "GPU" in summarizer.model.trainable_weights[0].device
):
if str(summarizer.device) == "cpu":
with self.assertRaises(Exception):
outputs = summarizer("This " * 1000)
outputs = summarizer("This " * 1000, truncation=TruncationStrategy.ONLY_FIRST)
@require_torch
def test_small_model_pt(self):
summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="pt", max_new_tokens=19)
outputs = summarizer("This is a small test")
self.assertEqual(
outputs,
[
{
"summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป"
}
],
)
@require_torch
@slow
def test_integration_torch_summarization(self):
summarizer = pipeline(task="summarization", device=torch_device)
cnn_article = (
" (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on"
" Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The"
" formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based."
" The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its"
' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East'
' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the'
" situation in Palestinian territories, paving the way for possible war crimes investigations against"
" Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and"
" the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the"
" body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a"
' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the'
' world is also a step closer to ending a long era of impunity and injustice," he said, according to an'
' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge'
" Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the"
' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine'
" acquires all the rights as well as responsibilities that come with being a State Party to the Statute."
' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights'
' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should'
" immediately end their pressure, and countries that support universal acceptance of the court's treaty"
' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the'
" group. \"What's objectionable is the attempts to undermine international justice, not Palestine's"
' decision to join a treaty to which over 100 countries around the world are members." In January, when'
" the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an"
' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"'
" disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a"
' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in'
' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We'
' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"'
" it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the"
' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the'
" court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou"
' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war'
" between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry"
" will include alleged war crimes committed since June. The International Criminal Court was set up in"
" 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder"
" and Faith Karimi contributed to this report."
)
expected_cnn_summary = (
" The Palestinian Authority becomes the 123rd member of the International Criminal Court . The move gives"
" the court jurisdiction over alleged crimes in Palestinian territories . Israel and the United States"
" opposed the Palestinians' efforts to join the court . Rights group Human Rights Watch welcomes the move,"
" says governments seeking to penalize Palestine should end pressure ."
)
result = summarizer(cnn_article)
self.assertEqual(result[0]["summary_text"], expected_cnn_summary)
| transformers/tests/pipelines/test_pipelines_summarization.py/0 | {
"file_path": "transformers/tests/pipelines/test_pipelines_summarization.py",
"repo_id": "transformers",
"token_count": 3371
} | 579 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, FbgemmFp8Config, OPTForCausalLM
from transformers.testing_utils import (
backend_empty_cache,
require_accelerate,
require_fbgemm_gpu,
require_read_token,
require_torch_gpu,
require_torch_multi_gpu,
slow,
torch_device,
)
from transformers.utils import is_accelerate_available, is_torch_available
if is_torch_available():
import torch
if is_accelerate_available():
from accelerate import init_empty_weights
@require_torch_gpu
class FbgemmFp8ConfigTest(unittest.TestCase):
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = FbgemmFp8Config()
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {"modules_to_not_convert": ["lm_head.weight"], "quant_method": "fbgemm_fp8"}
quantization_config = FbgemmFp8Config.from_dict(dict)
self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert)
self.assertEqual(dict["quant_method"], quantization_config.quant_method)
@slow
@require_torch_gpu
@require_fbgemm_gpu
@require_accelerate
@require_read_token
class FbgemmFp8Test(unittest.TestCase):
model_name = "meta-llama/Meta-Llama-3-8B"
input_text = "What are we having for dinner?"
max_new_tokens = 9
EXPECTED_OUTPUT = "What are we having for dinner?\nI'm having a steak and a salad"
device_map = "cuda"
offload_device_map = {
"model.embed_tokens": 0,
"model.layers.0": 0,
"model.layers.1": 0,
"model.layers.2": 0,
"model.layers.3": 0,
"model.layers.4": 0,
"model.layers.5": 0,
"model.layers.6": 0,
"model.layers.7": 0,
"model.layers.8": 0,
"model.layers.9": 0,
"model.layers.10": 0,
"model.layers.11": 0,
"model.layers.12": 0,
"model.layers.13": 0,
"model.layers.14": 0,
"model.layers.15": 0,
"model.layers.16": "cpu",
"model.layers.17": "cpu",
"model.layers.18": "cpu",
"model.layers.19": "cpu",
"model.layers.20": "disk",
"model.layers.21": "disk",
"model.layers.22": "disk",
"model.layers.23": "disk",
"model.layers.24": "disk",
"model.layers.25": "disk",
"model.layers.26": "disk",
"model.layers.27": "disk",
"model.layers.28": "disk",
"model.layers.29": "disk",
"model.layers.30": "disk",
"model.layers.31": "disk",
"model.norm": "disk",
"lm_head": "disk",
}
# called only once for all test in this class
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
quantization_config = FbgemmFp8Config()
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name, device_map=cls.device_map, quantization_config=quantization_config
)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from transformers.integrations import FbgemmFp8Linear, replace_with_fbgemm_fp8_linear
model_id = "facebook/opt-350m"
config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5")
quantization_config = FbgemmFp8Config()
with init_empty_weights():
model = OPTForCausalLM(config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model = replace_with_fbgemm_fp8_linear(model, quantization_config=quantization_config)
nb_fbgemm_linear = 0
for module in model.modules():
if isinstance(module, FbgemmFp8Linear):
nb_fbgemm_linear += 1
self.assertEqual(nb_linears - 1, nb_fbgemm_linear)
with init_empty_weights():
model = OPTForCausalLM(config)
quantization_config = FbgemmFp8Config(modules_to_not_convert=["fc1"])
model = replace_with_fbgemm_fp8_linear(model, quantization_config=quantization_config)
nb_fbgemm_linear = 0
for module in model.modules():
if isinstance(module, FbgemmFp8Linear):
nb_fbgemm_linear += 1
self.assertEqual(nb_linears - 25, nb_fbgemm_linear)
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_change_loading_attributes(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
quantization_config = FbgemmFp8Config(activation_scale_ub=1000.0)
model = AutoModelForCausalLM.from_pretrained(
tmpdirname, device_map=self.device_map, quantization_config=quantization_config
)
self.assertEqual(model.model.layers[1].mlp.down_proj.input_scale_ub.item(), 1000.0)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_multi_gpu
def test_quantized_model_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantization_config = FbgemmFp8Config()
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name, device_map="auto", quantization_config=quantization_config
)
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_quantized_model_offload(self):
"""
Simple test that checks if the quantized model returns an error when loading with cpu/disk offloaded
"""
quantization_config = FbgemmFp8Config()
with self.assertRaisesRegex(
ValueError, "You are attempting to load an FP8 model with a device_map that contains a CPU or disk device."
):
AutoModelForCausalLM.from_pretrained(
self.model_name, device_map=self.offload_device_map, quantization_config=quantization_config
)
def test_save_pretrained_offload(self):
"""
Simple test that checks if the saved quantized model is working properly cpu/disk offload
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.offload_device_map)
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_multi_gpu
def test_save_pretrained_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto")
self.assertTrue(set(model.hf_device_map.values()) == {0, 1})
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_gpu
@require_accelerate
@require_fbgemm_gpu
class FbgemmFp8LinearTest(unittest.TestCase):
def test_linear_preserves_shape(self):
"""
Test that FbgemmFp8Linear preserves shape when in_features == out_features.
"""
from transformers.integrations import FbgemmFp8Linear
with init_empty_weights(include_buffers=True):
linear = FbgemmFp8Linear(1024, 1024, True)
x = torch.rand((17, 23, 1024))
x_ = linear(x)
self.assertEqual(x_.shape, x.shape)
def test_linear_with_diff_feature_size_preserves_shape(self):
"""
Test that FbgemmFp8Linear generates the correct shape when in_features != out_features.
"""
from transformers.integrations import FbgemmFp8Linear
with init_empty_weights(include_buffers=True):
linear = FbgemmFp8Linear(1024, 2048, True)
x = torch.rand((17, 23, 1024))
x_ = linear(x)
self.assertEqual(x_.shape, (17, 23, 2048))
| transformers/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py/0 | {
"file_path": "transformers/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py",
"repo_id": "transformers",
"token_count": 5006
} | 580 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import json
import os
import random
import re
import unittest
from dataclasses import fields, is_dataclass
from pathlib import Path
from textwrap import dedent
from typing import get_args
from huggingface_hub import (
AudioClassificationInput,
AutomaticSpeechRecognitionInput,
DepthEstimationInput,
ImageClassificationInput,
ImageSegmentationInput,
ImageToTextInput,
ObjectDetectionInput,
QuestionAnsweringInput,
VideoClassificationInput,
ZeroShotImageClassificationInput,
)
from transformers.models.auto.processing_auto import PROCESSOR_MAPPING_NAMES
from transformers.pipelines import (
AudioClassificationPipeline,
AutomaticSpeechRecognitionPipeline,
DepthEstimationPipeline,
ImageClassificationPipeline,
ImageSegmentationPipeline,
ImageToTextPipeline,
ObjectDetectionPipeline,
QuestionAnsweringPipeline,
VideoClassificationPipeline,
ZeroShotImageClassificationPipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
require_av,
require_pytesseract,
require_timm,
require_torch,
require_torch_or_tf,
require_vision,
)
from transformers.utils import direct_transformers_import, logging
from .pipelines.test_pipelines_audio_classification import AudioClassificationPipelineTests
from .pipelines.test_pipelines_automatic_speech_recognition import AutomaticSpeechRecognitionPipelineTests
from .pipelines.test_pipelines_depth_estimation import DepthEstimationPipelineTests
from .pipelines.test_pipelines_document_question_answering import DocumentQuestionAnsweringPipelineTests
from .pipelines.test_pipelines_feature_extraction import FeatureExtractionPipelineTests
from .pipelines.test_pipelines_fill_mask import FillMaskPipelineTests
from .pipelines.test_pipelines_image_classification import ImageClassificationPipelineTests
from .pipelines.test_pipelines_image_feature_extraction import ImageFeatureExtractionPipelineTests
from .pipelines.test_pipelines_image_segmentation import ImageSegmentationPipelineTests
from .pipelines.test_pipelines_image_text_to_text import ImageTextToTextPipelineTests
from .pipelines.test_pipelines_image_to_image import ImageToImagePipelineTests
from .pipelines.test_pipelines_image_to_text import ImageToTextPipelineTests
from .pipelines.test_pipelines_mask_generation import MaskGenerationPipelineTests
from .pipelines.test_pipelines_object_detection import ObjectDetectionPipelineTests
from .pipelines.test_pipelines_question_answering import QAPipelineTests
from .pipelines.test_pipelines_summarization import SummarizationPipelineTests
from .pipelines.test_pipelines_table_question_answering import TQAPipelineTests
from .pipelines.test_pipelines_text2text_generation import Text2TextGenerationPipelineTests
from .pipelines.test_pipelines_text_classification import TextClassificationPipelineTests
from .pipelines.test_pipelines_text_generation import TextGenerationPipelineTests
from .pipelines.test_pipelines_text_to_audio import TextToAudioPipelineTests
from .pipelines.test_pipelines_token_classification import TokenClassificationPipelineTests
from .pipelines.test_pipelines_translation import TranslationPipelineTests
from .pipelines.test_pipelines_video_classification import VideoClassificationPipelineTests
from .pipelines.test_pipelines_visual_question_answering import VisualQuestionAnsweringPipelineTests
from .pipelines.test_pipelines_zero_shot import ZeroShotClassificationPipelineTests
from .pipelines.test_pipelines_zero_shot_audio_classification import ZeroShotAudioClassificationPipelineTests
from .pipelines.test_pipelines_zero_shot_image_classification import ZeroShotImageClassificationPipelineTests
from .pipelines.test_pipelines_zero_shot_object_detection import ZeroShotObjectDetectionPipelineTests
pipeline_test_mapping = {
"audio-classification": {"test": AudioClassificationPipelineTests},
"automatic-speech-recognition": {"test": AutomaticSpeechRecognitionPipelineTests},
"depth-estimation": {"test": DepthEstimationPipelineTests},
"document-question-answering": {"test": DocumentQuestionAnsweringPipelineTests},
"feature-extraction": {"test": FeatureExtractionPipelineTests},
"fill-mask": {"test": FillMaskPipelineTests},
"image-classification": {"test": ImageClassificationPipelineTests},
"image-feature-extraction": {"test": ImageFeatureExtractionPipelineTests},
"image-segmentation": {"test": ImageSegmentationPipelineTests},
"image-text-to-text": {"test": ImageTextToTextPipelineTests},
"image-to-image": {"test": ImageToImagePipelineTests},
"image-to-text": {"test": ImageToTextPipelineTests},
"mask-generation": {"test": MaskGenerationPipelineTests},
"object-detection": {"test": ObjectDetectionPipelineTests},
"question-answering": {"test": QAPipelineTests},
"summarization": {"test": SummarizationPipelineTests},
"table-question-answering": {"test": TQAPipelineTests},
"text2text-generation": {"test": Text2TextGenerationPipelineTests},
"text-classification": {"test": TextClassificationPipelineTests},
"text-generation": {"test": TextGenerationPipelineTests},
"text-to-audio": {"test": TextToAudioPipelineTests},
"token-classification": {"test": TokenClassificationPipelineTests},
"translation": {"test": TranslationPipelineTests},
"video-classification": {"test": VideoClassificationPipelineTests},
"visual-question-answering": {"test": VisualQuestionAnsweringPipelineTests},
"zero-shot": {"test": ZeroShotClassificationPipelineTests},
"zero-shot-audio-classification": {"test": ZeroShotAudioClassificationPipelineTests},
"zero-shot-image-classification": {"test": ZeroShotImageClassificationPipelineTests},
"zero-shot-object-detection": {"test": ZeroShotObjectDetectionPipelineTests},
}
task_to_pipeline_and_spec_mapping = {
# Adding a task to this list will cause its pipeline input signature to be checked against the corresponding
# task spec in the HF Hub
"audio-classification": (AudioClassificationPipeline, AudioClassificationInput),
"automatic-speech-recognition": (AutomaticSpeechRecognitionPipeline, AutomaticSpeechRecognitionInput),
"depth-estimation": (DepthEstimationPipeline, DepthEstimationInput),
"image-classification": (ImageClassificationPipeline, ImageClassificationInput),
"image-segmentation": (ImageSegmentationPipeline, ImageSegmentationInput),
"image-to-text": (ImageToTextPipeline, ImageToTextInput),
"object-detection": (ObjectDetectionPipeline, ObjectDetectionInput),
"question-answering": (QuestionAnsweringPipeline, QuestionAnsweringInput),
"video-classification": (VideoClassificationPipeline, VideoClassificationInput),
"zero-shot-image-classification": (ZeroShotImageClassificationPipeline, ZeroShotImageClassificationInput),
}
for task_info in pipeline_test_mapping.values():
test = task_info["test"]
task_info["mapping"] = {
"pt": getattr(test, "model_mapping", None),
"tf": getattr(test, "tf_model_mapping", None),
}
# The default value `hf-internal-testing` is for running the pipeline testing against the tiny models on the Hub.
# For debugging purpose, we can specify a local path which is the `output_path` argument of a previous run of
# `utils/create_dummy_models.py`.
TRANSFORMERS_TINY_MODEL_PATH = os.environ.get("TRANSFORMERS_TINY_MODEL_PATH", "hf-internal-testing")
if TRANSFORMERS_TINY_MODEL_PATH == "hf-internal-testing":
TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(Path(__file__).parent.parent, "tests/utils/tiny_model_summary.json")
else:
TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, "reports", "tiny_model_summary.json")
with open(TINY_MODEL_SUMMARY_FILE_PATH) as fp:
tiny_model_summary = json.load(fp)
PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent, "src/transformers")
# Dynamically import the Transformers module to grab the attribute classes of the processor form their names.
transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS)
logger = logging.get_logger(__name__)
class PipelineTesterMixin:
model_tester = None
pipeline_model_mapping = None
supported_frameworks = ["pt", "tf"]
def run_task_tests(self, task, dtype="float32"):
"""Run pipeline tests for a specific `task`
Args:
task (`str`):
A task name. This should be a key in the mapping `pipeline_test_mapping`.
dtype (`str`, `optional`, defaults to `'float32'`):
The torch dtype to use for the model. Can be used for FP16/other precision inference.
"""
if task not in self.pipeline_model_mapping:
self.skipTest(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{dtype} is skipped: `{task}` is not in "
f"`self.pipeline_model_mapping` for `{self.__class__.__name__}`."
)
model_architectures = self.pipeline_model_mapping[task]
if not isinstance(model_architectures, tuple):
model_architectures = (model_architectures,)
# We are going to run tests for multiple model architectures, some of them might be skipped
# with this flag we are control if at least one model were tested or all were skipped
at_least_one_model_is_tested = False
for model_architecture in model_architectures:
model_arch_name = model_architecture.__name__
model_type = model_architecture.config_class.model_type
# Get the canonical name
for _prefix in ["Flax", "TF"]:
if model_arch_name.startswith(_prefix):
model_arch_name = model_arch_name[len(_prefix) :]
break
if model_arch_name not in tiny_model_summary:
continue
tokenizer_names = tiny_model_summary[model_arch_name]["tokenizer_classes"]
# Sort image processors and feature extractors from tiny-models json file
image_processor_names = []
feature_extractor_names = []
processor_classes = tiny_model_summary[model_arch_name]["processor_classes"]
for cls_name in processor_classes:
if "ImageProcessor" in cls_name:
image_processor_names.append(cls_name)
elif "FeatureExtractor" in cls_name:
feature_extractor_names.append(cls_name)
# Processor classes are not in tiny models JSON file, so extract them from the mapping
# processors are mapped to instance, e.g. "XxxProcessor"
processor_names = PROCESSOR_MAPPING_NAMES.get(model_type, None)
if not isinstance(processor_names, (list, tuple)):
processor_names = [processor_names]
commit = None
if model_arch_name in tiny_model_summary and "sha" in tiny_model_summary[model_arch_name]:
commit = tiny_model_summary[model_arch_name]["sha"]
repo_name = f"tiny-random-{model_arch_name}"
if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing":
repo_name = model_arch_name
self.run_model_pipeline_tests(
task,
repo_name,
model_architecture,
tokenizer_names=tokenizer_names,
image_processor_names=image_processor_names,
feature_extractor_names=feature_extractor_names,
processor_names=processor_names,
commit=commit,
dtype=dtype,
)
at_least_one_model_is_tested = True
if task in task_to_pipeline_and_spec_mapping:
pipeline, hub_spec = task_to_pipeline_and_spec_mapping[task]
compare_pipeline_args_to_hub_spec(pipeline, hub_spec)
if not at_least_one_model_is_tested:
self.skipTest(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{dtype} is skipped: Could not find any "
f"model architecture in the tiny models JSON file for `{task}`."
)
def run_model_pipeline_tests(
self,
task,
repo_name,
model_architecture,
tokenizer_names,
image_processor_names,
feature_extractor_names,
processor_names,
commit,
dtype="float32",
):
"""Run pipeline tests for a specific `task` with the give model class and tokenizer/processor class names
Args:
task (`str`):
A task name. This should be a key in the mapping `pipeline_test_mapping`.
repo_name (`str`):
A model repository id on the Hub.
model_architecture (`type`):
A subclass of `PretrainedModel` or `PretrainedModel`.
tokenizer_names (`list[str]`):
A list of names of a subclasses of `PreTrainedTokenizerFast` or `PreTrainedTokenizer`.
image_processor_names (`list[str]`):
A list of names of subclasses of `BaseImageProcessor`.
feature_extractor_names (`list[str]`):
A list of names of subclasses of `FeatureExtractionMixin`.
processor_names (`list[str]`):
A list of names of subclasses of `ProcessorMixin`.
commit (`str`):
The commit hash of the model repository on the Hub.
dtype (`str`, `optional`, defaults to `'float32'`):
The torch dtype to use for the model. Can be used for FP16/other precision inference.
"""
# Get an instance of the corresponding class `XXXPipelineTests` in order to use `get_test_pipeline` and
# `run_pipeline_test`.
pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__
# If no image processor or feature extractor is found, we still need to test the pipeline with None
# otherwise for any empty list we might skip all the tests
tokenizer_names = tokenizer_names or [None]
image_processor_names = image_processor_names or [None]
feature_extractor_names = feature_extractor_names or [None]
processor_names = processor_names or [None]
test_cases = [
{
"tokenizer_name": tokenizer_name,
"image_processor_name": image_processor_name,
"feature_extractor_name": feature_extractor_name,
"processor_name": processor_name,
}
for tokenizer_name in tokenizer_names
for image_processor_name in image_processor_names
for feature_extractor_name in feature_extractor_names
for processor_name in processor_names
]
for test_case in test_cases:
tokenizer_name = test_case["tokenizer_name"]
image_processor_name = test_case["image_processor_name"]
feature_extractor_name = test_case["feature_extractor_name"]
processor_name = test_case["processor_name"]
do_skip_test_case = self.is_pipeline_test_to_skip(
pipeline_test_class_name,
model_architecture.config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
)
if do_skip_test_case:
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{dtype} is skipped: test is "
f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer "
f"`{tokenizer_name}` | image processor `{image_processor_name}` | feature extractor {feature_extractor_name}."
)
continue
self.run_pipeline_test(
task,
repo_name,
model_architecture,
tokenizer_name=tokenizer_name,
image_processor_name=image_processor_name,
feature_extractor_name=feature_extractor_name,
processor_name=processor_name,
commit=commit,
dtype=dtype,
)
def run_pipeline_test(
self,
task,
repo_name,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
commit,
dtype="float32",
):
"""Run pipeline tests for a specific `task` with the give model class and tokenizer/processor class name
The model will be loaded from a model repository on the Hub.
Args:
task (`str`):
A task name. This should be a key in the mapping `pipeline_test_mapping`.
repo_name (`str`):
A model repository id on the Hub.
model_architecture (`type`):
A subclass of `PretrainedModel` or `PretrainedModel`.
tokenizer_name (`str`):
The name of a subclass of `PreTrainedTokenizerFast` or `PreTrainedTokenizer`.
image_processor_name (`str`):
The name of a subclass of `BaseImageProcessor`.
feature_extractor_name (`str`):
The name of a subclass of `FeatureExtractionMixin`.
processor_name (`str`):
The name of a subclass of `ProcessorMixin`.
commit (`str`):
The commit hash of the model repository on the Hub.
dtype (`str`, `optional`, defaults to `'float32'`):
The torch dtype to use for the model. Can be used for FP16/other precision inference.
"""
repo_id = f"{TRANSFORMERS_TINY_MODEL_PATH}/{repo_name}"
model_type = model_architecture.config_class.model_type
if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing":
repo_id = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, model_type, repo_name)
# -------------------- Load model --------------------
# TODO: We should check if a model file is on the Hub repo. instead.
try:
model = model_architecture.from_pretrained(repo_id, revision=commit)
except Exception:
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{dtype} is skipped: Could not find or load "
f"the model from `{repo_id}` with `{model_architecture}`."
)
self.skipTest(f"Could not find or load the model from {repo_id} with {model_architecture}.")
# -------------------- Load tokenizer --------------------
tokenizer = None
if tokenizer_name is not None:
tokenizer_class = getattr(transformers_module, tokenizer_name)
tokenizer = tokenizer_class.from_pretrained(repo_id, revision=commit)
# -------------------- Load processors --------------------
processors = {}
for key, name in zip(
["image_processor", "feature_extractor", "processor"],
[image_processor_name, feature_extractor_name, processor_name],
):
if name is not None:
try:
# Can fail if some extra dependencies are not installed
processor_class = getattr(transformers_module, name)
processor = processor_class.from_pretrained(repo_id, revision=commit)
processors[key] = processor
except Exception:
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{dtype} is skipped: "
f"Could not load the {key} from `{repo_id}` with `{name}`."
)
self.skipTest(f"Could not load the {key} from {repo_id} with {name}.")
# ---------------------------------------------------------
# TODO: Maybe not upload such problematic tiny models to Hub.
if tokenizer is None and "image_processor" not in processors and "feature_extractor" not in processors:
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{dtype} is skipped: Could not find or load "
f"any tokenizer / image processor / feature extractor from `{repo_id}`."
)
self.skipTest(f"Could not find or load any tokenizer / processor from {repo_id}.")
pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__
if self.is_pipeline_test_to_skip_more(pipeline_test_class_name, model.config, model, tokenizer, **processors):
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{dtype} is skipped: test is "
f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer "
f"`{tokenizer_name}` | image processor `{image_processor_name}` | feature extractor `{feature_extractor_name}`."
)
self.skipTest(
f"Test is known to fail for: model `{model_architecture.__name__}` | tokenizer `{tokenizer_name}` "
f"| image processor `{image_processor_name}` | feature extractor `{feature_extractor_name}`."
)
# validate
validate_test_components(model, tokenizer)
if hasattr(model, "eval"):
model = model.eval()
# Get an instance of the corresponding class `XXXPipelineTests` in order to use `get_test_pipeline` and
# `run_pipeline_test`.
task_test = pipeline_test_mapping[task]["test"]()
pipeline, examples = task_test.get_test_pipeline(model, tokenizer, **processors, dtype=dtype)
if pipeline is None:
# The test can disable itself, but it should be very marginal
# Concerns: Wav2Vec2ForCTC without tokenizer test (FastTokenizer don't exist)
logger.warning(
f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{dtype} is skipped: Could not get the "
"pipeline for testing."
)
self.skipTest(reason="Could not get the pipeline for testing.")
task_test.run_pipeline_test(pipeline, examples)
def run_batch_test(pipeline, examples):
# Need to copy because `Conversation` are stateful
if pipeline.tokenizer is not None and pipeline.tokenizer.pad_token_id is None:
return # No batching for this and it's OK
# 10 examples with batch size 4 means there needs to be a unfinished batch
# which is important for the unbatcher
def data(n):
for _ in range(n):
# Need to copy because Conversation object is mutated
yield copy.deepcopy(random.choice(examples))
out = []
for item in pipeline(data(10), batch_size=4):
out.append(item)
self.assertEqual(len(out), 10)
run_batch_test(pipeline, examples)
@is_pipeline_test
def test_pipeline_audio_classification(self):
self.run_task_tests(task="audio-classification")
@is_pipeline_test
@require_torch
def test_pipeline_audio_classification_fp16(self):
self.run_task_tests(task="audio-classification", dtype="float16")
@is_pipeline_test
def test_pipeline_automatic_speech_recognition(self):
self.run_task_tests(task="automatic-speech-recognition")
@is_pipeline_test
@require_torch
def test_pipeline_automatic_speech_recognition_fp16(self):
self.run_task_tests(task="automatic-speech-recognition", dtype="float16")
@is_pipeline_test
@require_vision
@require_timm
@require_torch
def test_pipeline_depth_estimation(self):
self.run_task_tests(task="depth-estimation")
@is_pipeline_test
@require_vision
@require_timm
@require_torch
def test_pipeline_depth_estimation_fp16(self):
self.run_task_tests(task="depth-estimation", dtype="float16")
@is_pipeline_test
@require_pytesseract
@require_torch
@require_vision
def test_pipeline_document_question_answering(self):
self.run_task_tests(task="document-question-answering")
@is_pipeline_test
@require_pytesseract
@require_torch
@require_vision
def test_pipeline_document_question_answering_fp16(self):
self.run_task_tests(task="document-question-answering", dtype="float16")
@is_pipeline_test
def test_pipeline_feature_extraction(self):
self.run_task_tests(task="feature-extraction")
@is_pipeline_test
@require_torch
def test_pipeline_feature_extraction_fp16(self):
self.run_task_tests(task="feature-extraction", dtype="float16")
@is_pipeline_test
def test_pipeline_fill_mask(self):
self.run_task_tests(task="fill-mask")
@is_pipeline_test
@require_torch
def test_pipeline_fill_mask_fp16(self):
self.run_task_tests(task="fill-mask", dtype="float16")
@is_pipeline_test
@require_torch_or_tf
@require_vision
def test_pipeline_image_classification(self):
self.run_task_tests(task="image-classification")
@is_pipeline_test
@require_vision
@require_torch
def test_pipeline_image_classification_fp16(self):
self.run_task_tests(task="image-classification", dtype="float16")
@is_pipeline_test
@require_vision
@require_timm
@require_torch
def test_pipeline_image_segmentation(self):
self.run_task_tests(task="image-segmentation")
@is_pipeline_test
@require_vision
@require_timm
@require_torch
def test_pipeline_image_segmentation_fp16(self):
self.run_task_tests(task="image-segmentation", dtype="float16")
@is_pipeline_test
@require_vision
@require_torch
def test_pipeline_image_text_to_text(self):
self.run_task_tests(task="image-text-to-text")
@is_pipeline_test
@require_vision
@require_torch
def test_pipeline_image_text_to_text_fp16(self):
self.run_task_tests(task="image-text-to-text", dtype="float16")
@is_pipeline_test
@require_vision
def test_pipeline_image_to_text(self):
self.run_task_tests(task="image-to-text")
@is_pipeline_test
@require_vision
@require_torch
def test_pipeline_image_to_text_fp16(self):
self.run_task_tests(task="image-to-text", dtype="float16")
@is_pipeline_test
@require_timm
@require_vision
@require_torch
def test_pipeline_image_feature_extraction(self):
self.run_task_tests(task="image-feature-extraction")
@is_pipeline_test
@require_timm
@require_vision
@require_torch
def test_pipeline_image_feature_extraction_fp16(self):
self.run_task_tests(task="image-feature-extraction", dtype="float16")
@unittest.skip(reason="`run_pipeline_test` is currently not implemented.")
@is_pipeline_test
@require_vision
@require_torch
def test_pipeline_mask_generation(self):
self.run_task_tests(task="mask-generation")
@unittest.skip(reason="`run_pipeline_test` is currently not implemented.")
@is_pipeline_test
@require_vision
@require_torch
def test_pipeline_mask_generation_fp16(self):
self.run_task_tests(task="mask-generation", dtype="float16")
@is_pipeline_test
@require_vision
@require_timm
@require_torch
def test_pipeline_object_detection(self):
self.run_task_tests(task="object-detection")
@is_pipeline_test
@require_vision
@require_timm
@require_torch
def test_pipeline_object_detection_fp16(self):
self.run_task_tests(task="object-detection", dtype="float16")
@is_pipeline_test
def test_pipeline_question_answering(self):
self.run_task_tests(task="question-answering")
@is_pipeline_test
@require_torch
def test_pipeline_question_answering_fp16(self):
self.run_task_tests(task="question-answering", dtype="float16")
@is_pipeline_test
def test_pipeline_summarization(self):
self.run_task_tests(task="summarization")
@is_pipeline_test
@require_torch
def test_pipeline_summarization_fp16(self):
self.run_task_tests(task="summarization", dtype="float16")
@is_pipeline_test
def test_pipeline_table_question_answering(self):
self.run_task_tests(task="table-question-answering")
@is_pipeline_test
@require_torch
def test_pipeline_table_question_answering_fp16(self):
self.run_task_tests(task="table-question-answering", dtype="float16")
@is_pipeline_test
def test_pipeline_text2text_generation(self):
self.run_task_tests(task="text2text-generation")
@is_pipeline_test
@require_torch
def test_pipeline_text2text_generation_fp16(self):
self.run_task_tests(task="text2text-generation", dtype="float16")
@is_pipeline_test
def test_pipeline_text_classification(self):
self.run_task_tests(task="text-classification")
@is_pipeline_test
@require_torch
def test_pipeline_text_classification_fp16(self):
self.run_task_tests(task="text-classification", dtype="float16")
@is_pipeline_test
@require_torch_or_tf
def test_pipeline_text_generation(self):
self.run_task_tests(task="text-generation")
@is_pipeline_test
@require_torch
def test_pipeline_text_generation_fp16(self):
self.run_task_tests(task="text-generation", dtype="float16")
@is_pipeline_test
@require_torch
def test_pipeline_text_to_audio(self):
self.run_task_tests(task="text-to-audio")
@is_pipeline_test
@require_torch
def test_pipeline_text_to_audio_fp16(self):
self.run_task_tests(task="text-to-audio", dtype="float16")
@is_pipeline_test
def test_pipeline_token_classification(self):
self.run_task_tests(task="token-classification")
@is_pipeline_test
@require_torch
def test_pipeline_token_classification_fp16(self):
self.run_task_tests(task="token-classification", dtype="float16")
@is_pipeline_test
def test_pipeline_translation(self):
self.run_task_tests(task="translation")
@is_pipeline_test
@require_torch
def test_pipeline_translation_fp16(self):
self.run_task_tests(task="translation", dtype="float16")
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_av
def test_pipeline_video_classification(self):
self.run_task_tests(task="video-classification")
@is_pipeline_test
@require_vision
@require_torch
@require_av
def test_pipeline_video_classification_fp16(self):
self.run_task_tests(task="video-classification", dtype="float16")
@is_pipeline_test
@require_torch
@require_vision
def test_pipeline_visual_question_answering(self):
self.run_task_tests(task="visual-question-answering")
@is_pipeline_test
@require_torch
@require_vision
def test_pipeline_visual_question_answering_fp16(self):
self.run_task_tests(task="visual-question-answering", dtype="float16")
@is_pipeline_test
def test_pipeline_zero_shot(self):
self.run_task_tests(task="zero-shot")
@is_pipeline_test
@require_torch
def test_pipeline_zero_shot_fp16(self):
self.run_task_tests(task="zero-shot", dtype="float16")
@is_pipeline_test
@require_torch
def test_pipeline_zero_shot_audio_classification(self):
self.run_task_tests(task="zero-shot-audio-classification")
@is_pipeline_test
@require_torch
def test_pipeline_zero_shot_audio_classification_fp16(self):
self.run_task_tests(task="zero-shot-audio-classification", dtype="float16")
@is_pipeline_test
@require_vision
def test_pipeline_zero_shot_image_classification(self):
self.run_task_tests(task="zero-shot-image-classification")
@is_pipeline_test
@require_vision
@require_torch
def test_pipeline_zero_shot_image_classification_fp16(self):
self.run_task_tests(task="zero-shot-image-classification", dtype="float16")
@is_pipeline_test
@require_vision
@require_torch
def test_pipeline_zero_shot_object_detection(self):
self.run_task_tests(task="zero-shot-object-detection")
@is_pipeline_test
@require_vision
@require_torch
def test_pipeline_zero_shot_object_detection_fp16(self):
self.run_task_tests(task="zero-shot-object-detection", dtype="float16")
# This contains the test cases to be skipped without model architecture being involved.
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
"""Skip some tests based on the classes or their names without the instantiated objects.
This is to avoid calling `from_pretrained` (so reducing the runtime) if we already know the tests will fail.
"""
# No fix is required for this case.
if (
pipeline_test_case_name == "DocumentQuestionAnsweringPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `DocumentQuestionAnsweringPipelineTests` requires a fast tokenizer.
return True
return False
def is_pipeline_test_to_skip_more(
self,
pipeline_test_case_name,
config,
model,
tokenizer,
image_processor=None,
feature_extractor=None,
processor=None,
): # noqa
"""Skip some more tests based on the information from the instantiated objects."""
# No fix is required for this case.
if (
pipeline_test_case_name == "QAPipelineTests"
and tokenizer is not None
and getattr(tokenizer, "pad_token", None) is None
and not tokenizer.__class__.__name__.endswith("Fast")
):
# `QAPipelineTests` doesn't work with a slow tokenizer that has no pad token.
return True
return False
def validate_test_components(model, tokenizer):
# TODO: Move this to tiny model creation script
# head-specific (within a model type) necessary changes to the config
# 1. for `BlenderbotForCausalLM`
if model.__class__.__name__ == "BlenderbotForCausalLM":
model.config.encoder_no_repeat_ngram_size = 0
# TODO: Change the tiny model creation script: don't create models with problematic tokenizers
# Avoid `IndexError` in embedding layers
CONFIG_WITHOUT_VOCAB_SIZE = ["CanineConfig"]
if tokenizer is not None:
# Removing `decoder=True` in `get_text_config` can lead to conflicting values e.g. in MusicGen
config_vocab_size = getattr(model.config.get_text_config(decoder=True), "vocab_size", None)
# For CLIP-like models
if config_vocab_size is None:
if hasattr(model.config, "text_encoder"):
config_vocab_size = getattr(model.config.text_config, "vocab_size", None)
if config_vocab_size is None and model.config.__class__.__name__ not in CONFIG_WITHOUT_VOCAB_SIZE:
raise ValueError(
"Could not determine `vocab_size` from model configuration while `tokenizer` is not `None`."
)
def get_arg_names_from_hub_spec(hub_spec, first_level=True):
# This util is used in pipeline tests, to verify that a pipeline's documented arguments
# match the Hub specification for that task
arg_names = []
for field in fields(hub_spec):
# Recurse into nested fields, but max one level
if is_dataclass(field.type):
arg_names.extend([field.name for field in fields(field.type)])
continue
# Next, catch nested fields that are part of a Union[], which is usually caused by Optional[]
for param_type in get_args(field.type):
if is_dataclass(param_type):
# Again, recurse into nested fields, but max one level
arg_names.extend([field.name for field in fields(param_type)])
break
else:
# Finally, this line triggers if it's not a nested field
arg_names.append(field.name)
return arg_names
def parse_args_from_docstring_by_indentation(docstring):
# This util is used in pipeline tests, to extract the argument names from a google-format docstring
# to compare them against the Hub specification for that task. It uses indentation levels as a primary
# source of truth, so these have to be correct!
docstring = dedent(docstring)
lines_by_indent = [
(len(line) - len(line.lstrip()), line.strip()) for line in docstring.split("\n") if line.strip()
]
args_lineno = None
args_indent = None
args_end = None
for lineno, (indent, line) in enumerate(lines_by_indent):
if line == "Args:":
args_lineno = lineno
args_indent = indent
continue
elif args_lineno is not None and indent == args_indent:
args_end = lineno
break
if args_lineno is None:
raise ValueError("No args block to parse!")
elif args_end is None:
args_block = lines_by_indent[args_lineno + 1 :]
else:
args_block = lines_by_indent[args_lineno + 1 : args_end]
outer_indent_level = min(line[0] for line in args_block)
outer_lines = [line for line in args_block if line[0] == outer_indent_level]
arg_names = [re.match(r"(\w+)\W", line[1]).group(1) for line in outer_lines]
return arg_names
def compare_pipeline_args_to_hub_spec(pipeline_class, hub_spec):
"""
Compares the docstring of a pipeline class to the fields of the matching Hub input signature class to ensure that
they match. This guarantees that Transformers pipelines can be used in inference without needing to manually
refactor or rename inputs.
"""
ALLOWED_TRANSFORMERS_ONLY_ARGS = ["timeout"]
docstring = inspect.getdoc(pipeline_class.__call__).strip()
docstring_args = set(parse_args_from_docstring_by_indentation(docstring))
hub_args = set(get_arg_names_from_hub_spec(hub_spec))
# Special casing: We allow the name of this arg to differ
hub_generate_args = [
hub_arg for hub_arg in hub_args if hub_arg.startswith("generate") or hub_arg.startswith("generation")
]
docstring_generate_args = [
docstring_arg
for docstring_arg in docstring_args
if docstring_arg.startswith("generate") or docstring_arg.startswith("generation")
]
if (
len(hub_generate_args) == 1
and len(docstring_generate_args) == 1
and hub_generate_args != docstring_generate_args
):
hub_args.remove(hub_generate_args[0])
docstring_args.remove(docstring_generate_args[0])
# Special casing 2: We permit some transformers-only arguments that don't affect pipeline output
for arg in ALLOWED_TRANSFORMERS_ONLY_ARGS:
if arg in docstring_args and arg not in hub_args:
docstring_args.remove(arg)
if hub_args != docstring_args:
error = [f"{pipeline_class.__name__} differs from JS spec {hub_spec.__name__}"]
matching_args = hub_args & docstring_args
huggingface_hub_only = hub_args - docstring_args
transformers_only = docstring_args - hub_args
if matching_args:
error.append(f"Matching args: {matching_args}")
if huggingface_hub_only:
error.append(f"Huggingface Hub only: {huggingface_hub_only}")
if transformers_only:
error.append(f"Transformers only: {transformers_only}")
raise ValueError("\n".join(error))
| transformers/tests/test_pipeline_mixin.py/0 | {
"file_path": "transformers/tests/test_pipeline_mixin.py",
"repo_id": "transformers",
"token_count": 16843
} | 581 |
import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data import Dataset
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.testing_utils import (
TestCasePlus,
backend_device_count,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_accelerator,
run_first,
torch_device,
)
def gather_from_all_gpus(tensor, world_size):
# Prepare a list to gather tensors from all processes
gather_list = [torch.zeros_like(tensor) for _ in range(world_size)]
dist.all_gather(gather_list, tensor)
return gather_list # List of tensors from all ranks
class DummyDataset(Dataset):
def __init__(self):
self.length = 64
def __len__(self):
return self.length
def __getitem__(self, i) -> int:
x = random.random()
y = np.random.random()
z = torch.rand([]).item()
return {"x": torch.tensor([x, y, z])}
class DummyModel(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(3, 1)
def forward(self, x):
local_tensor = torch.tensor(x, device=torch_device)
gathered = gather_from_all_gpus(local_tensor, dist.get_world_size())
assert not all(torch.allclose(t, gathered[0]) for t in gathered[1:])
y = self.fc(x)
return (y.mean(), y)
class TestTrainerDistributedWorkerSeed(TestCasePlus):
@run_first
@require_torch_multi_accelerator
def test_trainer(self):
device_count = backend_device_count(torch_device)
output_dir = self.get_auto_remove_tmp_dir()
distributed_args = f"""--nproc_per_node={device_count}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed_worker_seed.py
""".split()
args = f"--output_dir {output_dir}".split()
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
def run_distributed_training(training_args):
set_seed(42)
model = DummyModel()
dataset = DummyDataset()
training_args.max_steps = 10
# dataloader_num_workers must be > 0 to enable worker_init_fn
training_args.dataloader_num_workers = 2
trainer = Trainer(
model,
training_args,
train_dataset=dataset,
)
trainer.train()
if __name__ == "__main__":
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
run_distributed_training(training_args)
| transformers/tests/trainer/test_trainer_distributed_worker_seed.py/0 | {
"file_path": "transformers/tests/trainer/test_trainer_distributed_worker_seed.py",
"repo_id": "transformers",
"token_count": 1110
} | 582 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
from transformers import DetrConfig, MaskFormerConfig, ResNetBackbone, ResNetConfig, TimmBackbone
from transformers.testing_utils import require_torch, slow
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
load_backbone,
verify_out_features_out_indices,
)
from transformers.utils.import_utils import is_torch_available
if is_torch_available():
import torch
from transformers import BertPreTrainedModel
class BackboneUtilsTester(unittest.TestCase):
def test_get_aligned_output_features_output_indices(self):
stage_names = ["a", "b", "c"]
# Defaults to last layer if both are None
out_features, out_indices = get_aligned_output_features_output_indices(None, None, stage_names)
self.assertEqual(out_features, ["c"])
self.assertEqual(out_indices, [2])
# Out indices set to match out features
out_features, out_indices = get_aligned_output_features_output_indices(["a", "c"], None, stage_names)
self.assertEqual(out_features, ["a", "c"])
self.assertEqual(out_indices, [0, 2])
# Out features set to match out indices
out_features, out_indices = get_aligned_output_features_output_indices(None, [0, 2], stage_names)
self.assertEqual(out_features, ["a", "c"])
self.assertEqual(out_indices, [0, 2])
# Out features selected from negative indices
out_features, out_indices = get_aligned_output_features_output_indices(None, [-3, -1], stage_names)
self.assertEqual(out_features, ["a", "c"])
self.assertEqual(out_indices, [-3, -1])
def test_verify_out_features_out_indices(self):
# Stage names must be set
with pytest.raises(ValueError, match="Stage_names must be set for transformers backbones"):
verify_out_features_out_indices(["a", "b"], (0, 1), None)
# Out features must be a list
with pytest.raises(ValueError, match="out_features must be a list got <class 'tuple'>"):
verify_out_features_out_indices(("a", "b"), (0, 1), ["a", "b"])
# Out features must be a subset of stage names
with pytest.raises(
ValueError, match=r"out_features must be a subset of stage_names: \['a'\] got \['a', 'b'\]"
):
verify_out_features_out_indices(["a", "b"], [0, 1], ["a"])
# Out features must contain no duplicates
with pytest.raises(ValueError, match=r"out_features must not contain any duplicates, got \['a', 'a'\]"):
verify_out_features_out_indices(["a", "a"], None, ["a"])
# Out indices must be a list
with pytest.raises(ValueError, match="out_indices must be a list, got <class 'int'>"):
verify_out_features_out_indices(None, 0, ["a", "b"])
with pytest.raises(ValueError, match="out_indices must be a list, got <class 'tuple'>"):
verify_out_features_out_indices(None, (0, 1), ["a", "b"])
# Out indices must be a subset of stage names
with pytest.raises(
ValueError, match=r"out_indices must be valid indices for stage_names \['a'\], got \[0, 1\]"
):
verify_out_features_out_indices(None, [0, 1], ["a"])
# Out indices must contain no duplicates
with pytest.raises(ValueError, match=r"out_indices must not contain any duplicates, got \[0, 0\]"):
verify_out_features_out_indices(None, [0, 0], ["a"])
# Out features and out indices must be the same length
with pytest.raises(
ValueError, match="out_features and out_indices should have the same length if both are set"
):
verify_out_features_out_indices(["a", "b"], [0], ["a", "b", "c"])
# Out features should match out indices
with pytest.raises(
ValueError, match="out_features and out_indices should correspond to the same stages if both are set"
):
verify_out_features_out_indices(["a", "b"], [0, 2], ["a", "b", "c"])
# Out features and out indices should be in order
with pytest.raises(
ValueError,
match=r"out_features must be in the same order as stage_names, expected \['a', 'b'\] got \['b', 'a'\]",
):
verify_out_features_out_indices(["b", "a"], [0, 1], ["a", "b"])
with pytest.raises(
ValueError, match=r"out_indices must be in the same order as stage_names, expected \[-2, 1\] got \[1, -2\]"
):
verify_out_features_out_indices(["a", "b"], [1, -2], ["a", "b"])
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"], [0, 1, -1], ["a", "b", "c", "d"])
def test_backbone_mixin(self):
backbone = BackboneMixin()
backbone.stage_names = ["a", "b", "c"]
backbone._out_features = ["a", "c"]
backbone._out_indices = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features, ["a", "c"])
self.assertEqual(backbone.out_indices, [0, 2])
# Check out features and indices are updated correctly
backbone.out_features = ["a", "b"]
self.assertEqual(backbone.out_features, ["a", "b"])
self.assertEqual(backbone.out_indices, [0, 1])
backbone.out_indices = [-3, -1]
self.assertEqual(backbone.out_features, ["a", "c"])
self.assertEqual(backbone.out_indices, [-3, -1])
@slow
@require_torch
def test_load_backbone_from_config(self):
"""
Test that load_backbone correctly loads a backbone from a backbone config.
"""
config = MaskFormerConfig(backbone_config=ResNetConfig(out_indices=(0, 2)))
backbone = load_backbone(config)
self.assertEqual(backbone.out_features, ["stem", "stage2"])
self.assertEqual(backbone.out_indices, (0, 2))
self.assertIsInstance(backbone, ResNetBackbone)
@slow
@require_torch
def test_load_backbone_from_checkpoint(self):
"""
Test that load_backbone correctly loads a backbone from a checkpoint.
"""
config = MaskFormerConfig(backbone="microsoft/resnet-18", backbone_config=None)
backbone = load_backbone(config)
self.assertEqual(backbone.out_indices, [4])
self.assertEqual(backbone.out_features, ["stage4"])
self.assertIsInstance(backbone, ResNetBackbone)
config = MaskFormerConfig(
backbone="resnet18",
use_timm_backbone=True,
)
backbone = load_backbone(config)
# We can't know ahead of time the exact output features and indices, or the layer names before
# creating the timm model, so it defaults to the last layer (-1,) and has a different layer name
self.assertEqual(backbone.out_indices, (-1,))
self.assertEqual(backbone.out_features, ["layer4"])
self.assertIsInstance(backbone, TimmBackbone)
@slow
@require_torch
def test_load_backbone_backbone_kwargs(self):
"""
Test that load_backbone correctly configures the loaded backbone with the provided kwargs.
"""
config = MaskFormerConfig(backbone="resnet18", use_timm_backbone=True, backbone_kwargs={"out_indices": (0, 1)})
backbone = load_backbone(config)
self.assertEqual(backbone.out_indices, (0, 1))
self.assertIsInstance(backbone, TimmBackbone)
config = MaskFormerConfig(backbone="microsoft/resnet-18", backbone_kwargs={"out_indices": (0, 2)})
backbone = load_backbone(config)
self.assertEqual(backbone.out_indices, (0, 2))
self.assertIsInstance(backbone, ResNetBackbone)
# Check can't be passed with a backone config
with pytest.raises(ValueError):
config = MaskFormerConfig(
backbone="microsoft/resnet-18",
backbone_config=ResNetConfig(out_indices=(0, 2)),
backbone_kwargs={"out_indices": (0, 1)},
)
@slow
@require_torch
def test_load_backbone_in_new_model(self):
"""
Tests that new model can be created, with its weights instantiated and pretrained backbone weights loaded.
"""
# Inherit from PreTrainedModel to ensure that the weights are initialized
class NewModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.backbone = load_backbone(config)
self.layer_0 = torch.nn.Linear(config.hidden_size, config.hidden_size)
self.layer_1 = torch.nn.Linear(config.hidden_size, config.hidden_size)
def get_equal_not_equal_weights(model_0, model_1):
equal_weights = []
not_equal_weights = []
for (k0, v0), (k1, v1) in zip(model_0.named_parameters(), model_1.named_parameters()):
self.assertEqual(k0, k1)
weights_are_equal = torch.allclose(v0, v1)
if weights_are_equal:
equal_weights.append(k0)
else:
not_equal_weights.append(k0)
return equal_weights, not_equal_weights
config = MaskFormerConfig(use_pretrained_backbone=False, backbone="microsoft/resnet-18")
model_0 = NewModel(config)
model_1 = NewModel(config)
equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1)
# Norm layers are always initialized with the same weights
equal_weights = [w for w in equal_weights if "normalization" not in w]
self.assertEqual(len(equal_weights), 0)
self.assertEqual(len(not_equal_weights), 24)
# Now we create a new model with backbone weights that are pretrained
config.use_pretrained_backbone = True
model_0 = NewModel(config)
model_1 = NewModel(config)
equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1)
# Norm layers are always initialized with the same weights
equal_weights = [w for w in equal_weights if "normalization" not in w]
self.assertEqual(len(equal_weights), 20)
# Linear layers are still initialized randomly
self.assertEqual(len(not_equal_weights), 4)
# Check loading in timm backbone
config = DetrConfig(use_pretrained_backbone=False, backbone="resnet18", use_timm_backbone=True)
model_0 = NewModel(config)
model_1 = NewModel(config)
equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1)
# Norm layers are always initialized with the same weights
equal_weights = [w for w in equal_weights if "bn" not in w and "downsample.1" not in w]
self.assertEqual(len(equal_weights), 0)
self.assertEqual(len(not_equal_weights), 24)
# Now we create a new model with backbone weights that are pretrained
config.use_pretrained_backbone = True
model_0 = NewModel(config)
model_1 = NewModel(config)
equal_weights, not_equal_weights = get_equal_not_equal_weights(model_0, model_1)
# Norm layers are always initialized with the same weights
equal_weights = [w for w in equal_weights if "bn" not in w and "downsample.1" not in w]
self.assertEqual(len(equal_weights), 20)
# Linear layers are still initialized randomly
self.assertEqual(len(not_equal_weights), 4)
| transformers/tests/utils/test_backbone_utils.py/0 | {
"file_path": "transformers/tests/utils/test_backbone_utils.py",
"repo_id": "transformers",
"token_count": 5009
} | 583 |
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import os
import tempfile
import unittest
from io import BytesIO
from typing import Optional
import numpy as np
import pytest
import requests
from huggingface_hub.file_download import hf_hub_url, http_get
from requests import ConnectTimeout, ReadTimeout
from tests.pipelines.test_pipelines_document_question_answering import INVOICE_URL
from transformers import is_torch_available, is_vision_available
from transformers.image_utils import (
ChannelDimension,
get_channel_dimension_axis,
make_flat_list_of_images,
make_list_of_images,
make_nested_list_of_images,
)
from transformers.testing_utils import is_flaky, require_torch, require_vision
if is_torch_available():
import torch
if is_vision_available():
import PIL.Image
from transformers import ImageFeatureExtractionMixin
from transformers.image_utils import get_image_size, infer_channel_dimension_format, load_image
def get_image_from_hub_dataset(dataset_id: str, filename: str, revision: Optional[str] = None) -> "PIL.Image.Image":
url = hf_hub_url(dataset_id, filename, repo_type="dataset", revision=revision)
return PIL.Image.open(BytesIO(requests.get(url).content))
def get_random_image(height, width):
random_array = np.random.randint(0, 256, (height, width, 3), dtype=np.uint8)
return PIL.Image.fromarray(random_array)
@require_vision
class ImageFeatureExtractionTester(unittest.TestCase):
def test_conversion_image_to_array(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
# Conversion with defaults (rescale + channel first)
array1 = feature_extractor.to_numpy_array(image)
self.assertTrue(array1.dtype, np.float32)
self.assertEqual(array1.shape, (3, 16, 32))
# Conversion with rescale and not channel first
array2 = feature_extractor.to_numpy_array(image, channel_first=False)
self.assertTrue(array2.dtype, np.float32)
self.assertEqual(array2.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array1, array2.transpose(2, 0, 1)))
# Conversion with no rescale and channel first
array3 = feature_extractor.to_numpy_array(image, rescale=False)
self.assertTrue(array3.dtype, np.uint8)
self.assertEqual(array3.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array1, array3.astype(np.float32) * (1 / 255.0)))
# Conversion with no rescale and not channel first
array4 = feature_extractor.to_numpy_array(image, rescale=False, channel_first=False)
self.assertTrue(array4.dtype, np.uint8)
self.assertEqual(array4.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array2, array4.astype(np.float32) * (1 / 255.0)))
def test_conversion_array_to_array(self):
feature_extractor = ImageFeatureExtractionMixin()
array = np.random.randint(0, 256, (16, 32, 3), dtype=np.uint8)
# By default, rescale (for an array of ints) and channel permute
array1 = feature_extractor.to_numpy_array(array)
self.assertTrue(array1.dtype, np.float32)
self.assertEqual(array1.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0)))
# Same with no permute
array2 = feature_extractor.to_numpy_array(array, channel_first=False)
self.assertTrue(array2.dtype, np.float32)
self.assertEqual(array2.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array2, array.astype(np.float32) * (1 / 255.0)))
# Force rescale to False
array3 = feature_extractor.to_numpy_array(array, rescale=False)
self.assertTrue(array3.dtype, np.uint8)
self.assertEqual(array3.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array3, array.transpose(2, 0, 1)))
# Force rescale to False and no channel permute
array4 = feature_extractor.to_numpy_array(array, rescale=False, channel_first=False)
self.assertTrue(array4.dtype, np.uint8)
self.assertEqual(array4.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array4, array))
# Now test the default rescale for a float array (defaults to False)
array5 = feature_extractor.to_numpy_array(array2)
self.assertTrue(array5.dtype, np.float32)
self.assertEqual(array5.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array5, array1))
def test_make_list_of_images_pil(self):
# Test a single image is converted to a list of 1 image
pil_image = get_random_image(16, 32)
images_list = make_list_of_images(pil_image)
self.assertIsInstance(images_list, list)
self.assertEqual(len(images_list), 1)
self.assertIsInstance(images_list[0], PIL.Image.Image)
# Test a list of images is not modified
images = [get_random_image(16, 32) for _ in range(4)]
images_list = make_list_of_images(images)
self.assertIsInstance(images_list, list)
self.assertEqual(len(images_list), 4)
self.assertIsInstance(images_list[0], PIL.Image.Image)
def test_make_list_of_images_numpy(self):
# Test a single image is converted to a list of 1 image
images = np.random.randint(0, 256, (16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 1)
self.assertTrue(np.array_equal(images_list[0], images))
self.assertIsInstance(images_list, list)
# Test a batch of images is converted to a list of images
images = np.random.randint(0, 256, (4, 16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
# Test a list of images is not modified
images = [np.random.randint(0, 256, (16, 32, 3)) for _ in range(4)]
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
# Test batched masks with no channel dimension are converted to a list of masks
masks = np.random.randint(0, 2, (4, 16, 32))
masks_list = make_list_of_images(masks, expected_ndims=2)
self.assertEqual(len(masks_list), 4)
self.assertTrue(np.array_equal(masks_list[0], masks[0]))
self.assertIsInstance(masks_list, list)
@require_torch
def test_make_list_of_images_torch(self):
# Test a single image is converted to a list of 1 image
images = torch.randint(0, 256, (16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 1)
self.assertTrue(np.array_equal(images_list[0], images))
self.assertIsInstance(images_list, list)
# Test a batch of images is converted to a list of images
images = torch.randint(0, 256, (4, 16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
# Test a list of images is left unchanged
images = [torch.randint(0, 256, (16, 32, 3)) for _ in range(4)]
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
def test_make_flat_list_of_images_pil(self):
# Test a single image is converted to a list of 1 image
pil_image = get_random_image(16, 32)
images_list = make_flat_list_of_images(pil_image)
self.assertIsInstance(images_list, list)
self.assertEqual(len(images_list), 1)
self.assertIsInstance(images_list[0], PIL.Image.Image)
# Test a list of images is not modified
images = [get_random_image(16, 32) for _ in range(4)]
images_list = make_flat_list_of_images(images)
self.assertIsInstance(images_list, list)
self.assertEqual(len(images_list), 4)
self.assertIsInstance(images_list[0], PIL.Image.Image)
# Test a nested list of images is flattened
images = [[get_random_image(16, 32) for _ in range(2)] for _ in range(2)]
images_list = make_flat_list_of_images(images)
self.assertIsInstance(images_list, list)
self.assertEqual(len(images_list), 4)
self.assertIsInstance(images_list[0], PIL.Image.Image)
def test_make_flat_list_of_images_numpy(self):
# Test a single image is converted to a list of 1 image
images = np.random.randint(0, 256, (16, 32, 3))
images_list = make_flat_list_of_images(images)
self.assertEqual(len(images_list), 1)
self.assertTrue(np.array_equal(images_list[0], images))
self.assertIsInstance(images_list, list)
# Test a 4d array of images is changed to a list of images
images = np.random.randint(0, 256, (4, 16, 32, 3))
images_list = make_flat_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertIsInstance(images_list, list)
self.assertIsInstance(images_list[0], np.ndarray)
self.assertTrue(np.array_equal(images_list[0], images[0]))
# Test a list of images is not modified
images = [np.random.randint(0, 256, (16, 32, 3)) for _ in range(4)]
images_list = make_flat_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
# Test list of 4d array images is flattened
images = [np.random.randint(0, 256, (4, 16, 32, 3)) for _ in range(2)]
images_list = make_flat_list_of_images(images)
self.assertEqual(len(images_list), 8)
self.assertTrue(np.array_equal(images_list[0], images[0][0]))
self.assertIsInstance(images_list, list)
self.assertIsInstance(images_list[0], np.ndarray)
# Test nested list of images is flattened
images = [[np.random.randint(0, 256, (16, 32, 3)) for _ in range(2)] for _ in range(2)]
images_list = make_flat_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0][0]))
self.assertIsInstance(images_list, list)
@require_torch
def test_make_flat_list_of_images_torch(self):
# Test a single image is converted to a list of 1 image
images = torch.randint(0, 256, (16, 32, 3))
images_list = make_flat_list_of_images(images)
self.assertEqual(len(images_list), 1)
self.assertTrue(np.array_equal(images_list[0], images))
self.assertIsInstance(images_list, list)
# Test a 4d tensors of images is changed to a list of images
images = torch.randint(0, 256, (4, 16, 32, 3))
images_list = make_flat_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertIsInstance(images_list, list)
self.assertIsInstance(images_list[0], torch.Tensor)
self.assertTrue(np.array_equal(images_list[0], images[0]))
# Test a list of images is not modified
images = [torch.randint(0, 256, (16, 32, 3)) for _ in range(4)]
images_list = make_flat_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
# Test list of 4d tensors of imagess is flattened
images = [torch.randint(0, 256, (4, 16, 32, 3)) for _ in range(2)]
images_list = make_flat_list_of_images(images)
self.assertEqual(len(images_list), 8)
self.assertTrue(np.array_equal(images_list[0], images[0][0]))
self.assertIsInstance(images_list, list)
self.assertIsInstance(images_list[0], torch.Tensor)
# Test nested list of images is flattened
images = [[torch.randint(0, 256, (16, 32, 3)) for _ in range(2)] for _ in range(2)]
images_list = make_flat_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0][0]))
self.assertIsInstance(images_list, list)
def test_make_nested_list_of_images_pil(self):
# Test a single image is converted to a nested list of 1 image
pil_image = get_random_image(16, 32)
images_list = make_nested_list_of_images(pil_image)
self.assertIsInstance(images_list[0], list)
self.assertEqual(len(images_list[0]), 1)
self.assertIsInstance(images_list[0][0], PIL.Image.Image)
# Test a list of images is converted to a nested list of images
images = [get_random_image(16, 32) for _ in range(4)]
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertEqual(len(images_list), 1)
self.assertEqual(len(images_list[0]), 4)
self.assertIsInstance(images_list[0][0], PIL.Image.Image)
# Test a nested list of images is not modified
images = [[get_random_image(16, 32) for _ in range(2)] for _ in range(2)]
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertEqual(len(images_list), 2)
self.assertEqual(len(images_list[0]), 2)
self.assertIsInstance(images_list[0][0], PIL.Image.Image)
def test_make_nested_list_of_images_numpy(self):
# Test a single image is converted to a nested list of 1 image
images = np.random.randint(0, 256, (16, 32, 3))
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertEqual(len(images_list), 1)
self.assertTrue(np.array_equal(images_list[0][0], images))
# Test a 4d array of images is converted to a nested list of images
images = np.random.randint(0, 256, (4, 16, 32, 3))
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertIsInstance(images_list[0][0], np.ndarray)
self.assertEqual(len(images_list), 1)
self.assertEqual(len(images_list[0]), 4)
self.assertTrue(np.array_equal(images_list[0][0], images[0]))
# Test a list of images is converted to a nested list of images
images = [np.random.randint(0, 256, (16, 32, 3)) for _ in range(4)]
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertEqual(len(images_list), 1)
self.assertEqual(len(images_list[0]), 4)
self.assertTrue(np.array_equal(images_list[0][0], images[0]))
# Test a nested list of images is left unchanged
images = [[np.random.randint(0, 256, (16, 32, 3)) for _ in range(2)] for _ in range(2)]
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertEqual(len(images_list), 2)
self.assertEqual(len(images_list[0]), 2)
self.assertTrue(np.array_equal(images_list[0][0], images[0][0]))
# Test a list of 4d array images is converted to a nested list of images
images = [np.random.randint(0, 256, (4, 16, 32, 3)) for _ in range(2)]
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertIsInstance(images_list[0][0], np.ndarray)
self.assertEqual(len(images_list), 2)
self.assertEqual(len(images_list[0]), 4)
self.assertTrue(np.array_equal(images_list[0][0], images[0][0]))
@require_torch
def test_make_nested_list_of_images_torch(self):
# Test a single image is converted to a nested list of 1 image
images = torch.randint(0, 256, (16, 32, 3))
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertEqual(len(images_list[0]), 1)
self.assertTrue(np.array_equal(images_list[0][0], images))
# Test a 4d tensor of images is converted to a nested list of images
images = torch.randint(0, 256, (4, 16, 32, 3))
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertIsInstance(images_list[0][0], torch.Tensor)
self.assertEqual(len(images_list), 1)
self.assertEqual(len(images_list[0]), 4)
self.assertTrue(np.array_equal(images_list[0][0], images[0]))
# Test a list of images is converted to a nested list of images
images = [torch.randint(0, 256, (16, 32, 3)) for _ in range(4)]
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertEqual(len(images_list), 1)
self.assertEqual(len(images_list[0]), 4)
self.assertTrue(np.array_equal(images_list[0][0], images[0]))
# Test a nested list of images is left unchanged
images = [[torch.randint(0, 256, (16, 32, 3)) for _ in range(2)] for _ in range(2)]
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertEqual(len(images_list), 2)
self.assertEqual(len(images_list[0]), 2)
self.assertTrue(np.array_equal(images_list[0][0], images[0][0]))
# Test a list of 4d tensor images is converted to a nested list of images
images = [torch.randint(0, 256, (4, 16, 32, 3)) for _ in range(2)]
images_list = make_nested_list_of_images(images)
self.assertIsInstance(images_list[0], list)
self.assertIsInstance(images_list[0][0], torch.Tensor)
self.assertEqual(len(images_list), 2)
self.assertEqual(len(images_list[0]), 4)
self.assertTrue(np.array_equal(images_list[0][0], images[0][0]))
@require_torch
def test_conversion_torch_to_array(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.randint(0, 256, (16, 32, 3))
array = tensor.numpy()
# By default, rescale (for a tensor of ints) and channel permute
array1 = feature_extractor.to_numpy_array(array)
self.assertTrue(array1.dtype, np.float32)
self.assertEqual(array1.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0)))
# Same with no permute
array2 = feature_extractor.to_numpy_array(array, channel_first=False)
self.assertTrue(array2.dtype, np.float32)
self.assertEqual(array2.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array2, array.astype(np.float32) * (1 / 255.0)))
# Force rescale to False
array3 = feature_extractor.to_numpy_array(array, rescale=False)
self.assertTrue(array3.dtype, np.uint8)
self.assertEqual(array3.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array3, array.transpose(2, 0, 1)))
# Force rescale to False and no channel permute
array4 = feature_extractor.to_numpy_array(array, rescale=False, channel_first=False)
self.assertTrue(array4.dtype, np.uint8)
self.assertEqual(array4.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array4, array))
# Now test the default rescale for a float tensor (defaults to False)
array5 = feature_extractor.to_numpy_array(array2)
self.assertTrue(array5.dtype, np.float32)
self.assertEqual(array5.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array5, array1))
def test_conversion_image_to_image(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
# On an image, `to_pil_image1` is a noop.
image1 = feature_extractor.to_pil_image(image)
self.assertTrue(isinstance(image, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image), np.array(image1)))
def test_conversion_array_to_image(self):
feature_extractor = ImageFeatureExtractionMixin()
array = np.random.randint(0, 256, (16, 32, 3), dtype=np.uint8)
# By default, no rescale (for an array of ints)
image1 = feature_extractor.to_pil_image(array)
self.assertTrue(isinstance(image1, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image1), array))
# If the array is channel-first, proper reordering of the channels is done.
image2 = feature_extractor.to_pil_image(array.transpose(2, 0, 1))
self.assertTrue(isinstance(image2, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image2), array))
# If the array has floating type, it's rescaled by default.
image3 = feature_extractor.to_pil_image(array.astype(np.float32) * (1 / 255.0))
self.assertTrue(isinstance(image3, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image3), array))
# You can override the default to rescale.
image4 = feature_extractor.to_pil_image(array.astype(np.float32), rescale=False)
self.assertTrue(isinstance(image4, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image4), array))
# And with floats + channel first.
image5 = feature_extractor.to_pil_image(array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0))
self.assertTrue(isinstance(image5, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image5), array))
@require_torch
def test_conversion_tensor_to_image(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.randint(0, 256, (16, 32, 3))
array = tensor.numpy()
# By default, no rescale (for a tensor of ints)
image1 = feature_extractor.to_pil_image(tensor)
self.assertTrue(isinstance(image1, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image1), array))
# If the tensor is channel-first, proper reordering of the channels is done.
image2 = feature_extractor.to_pil_image(tensor.permute(2, 0, 1))
self.assertTrue(isinstance(image2, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image2), array))
# If the tensor has floating type, it's rescaled by default.
image3 = feature_extractor.to_pil_image(tensor.float() / 255.0)
self.assertTrue(isinstance(image3, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image3), array))
# You can override the default to rescale.
image4 = feature_extractor.to_pil_image(tensor.float(), rescale=False)
self.assertTrue(isinstance(image4, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image4), array))
# And with floats + channel first.
image5 = feature_extractor.to_pil_image(tensor.permute(2, 0, 1).float() * (1 / 255.0))
self.assertTrue(isinstance(image5, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image5), array))
def test_resize_image_and_array(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = np.array(image)
# Size can be an int or a tuple of ints.
resized_image = feature_extractor.resize(image, 8)
self.assertTrue(isinstance(resized_image, PIL.Image.Image))
self.assertEqual(resized_image.size, (8, 8))
resized_image1 = feature_extractor.resize(image, (8, 16))
self.assertTrue(isinstance(resized_image1, PIL.Image.Image))
self.assertEqual(resized_image1.size, (8, 16))
# Passing an array converts it to a PIL Image.
resized_image2 = feature_extractor.resize(array, 8)
self.assertTrue(isinstance(resized_image2, PIL.Image.Image))
self.assertEqual(resized_image2.size, (8, 8))
self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2)))
resized_image3 = feature_extractor.resize(image, (8, 16))
self.assertTrue(isinstance(resized_image3, PIL.Image.Image))
self.assertEqual(resized_image3.size, (8, 16))
self.assertTrue(np.array_equal(np.array(resized_image1), np.array(resized_image3)))
def test_resize_image_and_array_non_default_to_square(self):
feature_extractor = ImageFeatureExtractionMixin()
heights_widths = [
# height, width
# square image
(28, 28),
(27, 27),
# rectangular image: h < w
(28, 34),
(29, 35),
# rectangular image: h > w
(34, 28),
(35, 29),
]
# single integer or single integer in tuple/list
sizes = [22, 27, 28, 36, [22], (27,)]
for (height, width), size in zip(heights_widths, sizes):
for max_size in (None, 37, 1000):
image = get_random_image(height, width)
array = np.array(image)
size = size[0] if isinstance(size, (list, tuple)) else size
# Size can be an int or a tuple of ints.
# If size is an int, smaller edge of the image will be matched to this number.
# i.e, if height > width, then image will be rescaled to (size * height / width, size).
if height < width:
exp_w, exp_h = (int(size * width / height), size)
if max_size is not None and max_size < exp_w:
exp_w, exp_h = max_size, int(max_size * exp_h / exp_w)
elif width < height:
exp_w, exp_h = (size, int(size * height / width))
if max_size is not None and max_size < exp_h:
exp_w, exp_h = int(max_size * exp_w / exp_h), max_size
else:
exp_w, exp_h = (size, size)
if max_size is not None and max_size < size:
exp_w, exp_h = max_size, max_size
resized_image = feature_extractor.resize(image, size=size, default_to_square=False, max_size=max_size)
self.assertTrue(isinstance(resized_image, PIL.Image.Image))
self.assertEqual(resized_image.size, (exp_w, exp_h))
# Passing an array converts it to a PIL Image.
resized_image2 = feature_extractor.resize(array, size=size, default_to_square=False, max_size=max_size)
self.assertTrue(isinstance(resized_image2, PIL.Image.Image))
self.assertEqual(resized_image2.size, (exp_w, exp_h))
self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2)))
@require_torch
def test_resize_tensor(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.randint(0, 256, (16, 32, 3))
array = tensor.numpy()
# Size can be an int or a tuple of ints.
resized_image = feature_extractor.resize(tensor, 8)
self.assertTrue(isinstance(resized_image, PIL.Image.Image))
self.assertEqual(resized_image.size, (8, 8))
resized_image1 = feature_extractor.resize(tensor, (8, 16))
self.assertTrue(isinstance(resized_image1, PIL.Image.Image))
self.assertEqual(resized_image1.size, (8, 16))
# Check we get the same results as with NumPy arrays.
resized_image2 = feature_extractor.resize(array, 8)
self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2)))
resized_image3 = feature_extractor.resize(array, (8, 16))
self.assertTrue(np.array_equal(np.array(resized_image1), np.array(resized_image3)))
def test_normalize_image(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = np.array(image)
mean = [0.1, 0.5, 0.9]
std = [0.2, 0.4, 0.6]
# PIL Image are converted to NumPy arrays for the normalization
normalized_image = feature_extractor.normalize(image, mean, std)
self.assertTrue(isinstance(normalized_image, np.ndarray))
self.assertEqual(normalized_image.shape, (3, 16, 32))
# During the conversion rescale and channel first will be applied.
expected = array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0)
np_mean = np.array(mean).astype(np.float32)[:, None, None]
np_std = np.array(std).astype(np.float32)[:, None, None]
expected = (expected - np_mean) / np_std
self.assertTrue(np.array_equal(normalized_image, expected))
def test_normalize_array(self):
feature_extractor = ImageFeatureExtractionMixin()
array = np.random.random((16, 32, 3))
mean = [0.1, 0.5, 0.9]
std = [0.2, 0.4, 0.6]
# mean and std can be passed as lists or NumPy arrays.
expected = (array - np.array(mean)) / np.array(std)
normalized_array = feature_extractor.normalize(array, mean, std)
self.assertTrue(np.array_equal(normalized_array, expected))
normalized_array = feature_extractor.normalize(array, np.array(mean), np.array(std))
self.assertTrue(np.array_equal(normalized_array, expected))
# Normalize will detect automatically if channel first or channel last is used.
array = np.random.random((3, 16, 32))
expected = (array - np.array(mean)[:, None, None]) / np.array(std)[:, None, None]
normalized_array = feature_extractor.normalize(array, mean, std)
self.assertTrue(np.array_equal(normalized_array, expected))
normalized_array = feature_extractor.normalize(array, np.array(mean), np.array(std))
self.assertTrue(np.array_equal(normalized_array, expected))
@require_torch
def test_normalize_tensor(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.rand(16, 32, 3)
mean = [0.1, 0.5, 0.9]
std = [0.2, 0.4, 0.6]
# mean and std can be passed as lists or tensors.
expected = (tensor - torch.tensor(mean)) / torch.tensor(std)
normalized_tensor = feature_extractor.normalize(tensor, mean, std)
self.assertTrue(torch.equal(normalized_tensor, expected))
normalized_tensor = feature_extractor.normalize(tensor, torch.tensor(mean), torch.tensor(std))
self.assertTrue(torch.equal(normalized_tensor, expected))
# Normalize will detect automatically if channel first or channel last is used.
tensor = torch.rand(3, 16, 32)
expected = (tensor - torch.tensor(mean)[:, None, None]) / torch.tensor(std)[:, None, None]
normalized_tensor = feature_extractor.normalize(tensor, mean, std)
self.assertTrue(torch.equal(normalized_tensor, expected))
normalized_tensor = feature_extractor.normalize(tensor, torch.tensor(mean), torch.tensor(std))
self.assertTrue(torch.equal(normalized_tensor, expected))
def test_center_crop_image(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
# Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions.
crop_sizes = [8, (8, 64), 20, (32, 64)]
for size in crop_sizes:
cropped_image = feature_extractor.center_crop(image, size)
self.assertTrue(isinstance(cropped_image, PIL.Image.Image))
# PIL Image.size is transposed compared to NumPy or PyTorch (width first instead of height first).
expected_size = (size, size) if isinstance(size, int) else (size[1], size[0])
self.assertEqual(cropped_image.size, expected_size)
def test_center_crop_array(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = feature_extractor.to_numpy_array(image)
# Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions.
crop_sizes = [8, (8, 64), 20, (32, 64)]
for size in crop_sizes:
cropped_array = feature_extractor.center_crop(array, size)
self.assertTrue(isinstance(cropped_array, np.ndarray))
expected_size = (size, size) if isinstance(size, int) else size
self.assertEqual(cropped_array.shape[-2:], expected_size)
# Check result is consistent with PIL.Image.crop
cropped_image = feature_extractor.center_crop(image, size)
self.assertTrue(np.array_equal(cropped_array, feature_extractor.to_numpy_array(cropped_image)))
@require_torch
def test_center_crop_tensor(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = feature_extractor.to_numpy_array(image)
tensor = torch.tensor(array)
# Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions.
crop_sizes = [8, (8, 64), 20, (32, 64)]
for size in crop_sizes:
cropped_tensor = feature_extractor.center_crop(tensor, size)
self.assertTrue(isinstance(cropped_tensor, torch.Tensor))
expected_size = (size, size) if isinstance(size, int) else size
self.assertEqual(cropped_tensor.shape[-2:], expected_size)
# Check result is consistent with PIL.Image.crop
cropped_image = feature_extractor.center_crop(image, size)
self.assertTrue(torch.equal(cropped_tensor, torch.tensor(feature_extractor.to_numpy_array(cropped_image))))
@require_vision
class LoadImageTester(unittest.TestCase):
def test_load_img_url(self):
img = load_image(INVOICE_URL)
img_arr = np.array(img)
self.assertEqual(img_arr.shape, (1061, 750, 3))
@is_flaky()
def test_load_img_url_timeout(self):
with self.assertRaises((ReadTimeout, ConnectTimeout)):
load_image(INVOICE_URL, timeout=0.001)
def test_load_img_local(self):
img = load_image("./tests/fixtures/tests_samples/COCO/000000039769.png")
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(480, 640, 3),
)
def test_load_img_base64_prefix(self):
try:
tmp_file = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp_file, "wb") as f:
http_get(
"https://huggingface.co/datasets/hf-internal-testing/dummy-base64-images/raw/main/image_0.txt", f
)
with open(tmp_file, encoding="utf-8") as b64:
img = load_image(b64.read())
img_arr = np.array(img)
finally:
os.remove(tmp_file)
self.assertEqual(img_arr.shape, (64, 32, 3))
def test_load_img_base64(self):
try:
tmp_file = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp_file, "wb") as f:
http_get(
"https://huggingface.co/datasets/hf-internal-testing/dummy-base64-images/raw/main/image_1.txt", f
)
with open(tmp_file, encoding="utf-8") as b64:
img = load_image(b64.read())
img_arr = np.array(img)
finally:
os.remove(tmp_file)
self.assertEqual(img_arr.shape, (64, 32, 3))
def test_load_img_base64_encoded_bytes(self):
try:
tmp_file = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp_file, "wb") as f:
http_get(
"https://huggingface.co/datasets/hf-internal-testing/dummy-base64-images/raw/main/image_2.txt", f
)
with codecs.open(tmp_file, encoding="unicode_escape") as b64:
img = load_image(b64.read())
img_arr = np.array(img)
finally:
os.remove(tmp_file)
self.assertEqual(img_arr.shape, (256, 256, 3))
def test_load_img_rgba(self):
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
img = get_image_from_hub_dataset(
"hf-internal-testing/fixtures_image_utils", "0-test-lena.png", revision="refs/pr/1"
)
img = load_image(img) # img with mode RGBA
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(512, 512, 3),
)
def test_load_img_la(self):
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
img = get_image_from_hub_dataset(
"hf-internal-testing/fixtures_image_utils", "1-test-parrots.png", revision="refs/pr/1"
)
img = load_image(img) # img with mode LA
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(512, 768, 3),
)
def test_load_img_l(self):
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
img = get_image_from_hub_dataset(
"hf-internal-testing/fixtures_image_utils", "2-test-tree.png", revision="refs/pr/1"
)
img = load_image(img) # img with mode L
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(381, 225, 3),
)
def test_load_img_exif_transpose(self):
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
img_without_exif_transpose = get_image_from_hub_dataset(
"hf-internal-testing/fixtures_image_utils", "3-test-cat-rotated.jpg", revision="refs/pr/1"
)
img_arr_without_exif_transpose = np.array(img_without_exif_transpose)
self.assertEqual(
img_arr_without_exif_transpose.shape,
(333, 500, 3),
)
img_with_exif_transpose = load_image(img_without_exif_transpose)
img_arr_with_exif_transpose = np.array(img_with_exif_transpose)
self.assertEqual(
img_arr_with_exif_transpose.shape,
(500, 333, 3),
)
class UtilFunctionTester(unittest.TestCase):
def test_get_image_size(self):
# Test we can infer the size and channel dimension of an image.
image = np.random.randint(0, 256, (32, 64, 3))
self.assertEqual(get_image_size(image), (32, 64))
image = np.random.randint(0, 256, (3, 32, 64))
self.assertEqual(get_image_size(image), (32, 64))
# Test the channel dimension can be overridden
image = np.random.randint(0, 256, (3, 32, 64))
self.assertEqual(get_image_size(image, channel_dim=ChannelDimension.LAST), (3, 32))
def test_infer_channel_dimension(self):
# Test we fail with invalid input
with pytest.raises(ValueError):
infer_channel_dimension_format(np.random.randint(0, 256, (10, 10)))
with pytest.raises(ValueError):
infer_channel_dimension_format(np.random.randint(0, 256, (10, 10, 10, 10, 10)))
# Test we fail if neither first not last dimension is of size 3 or 1
with pytest.raises(ValueError):
infer_channel_dimension_format(np.random.randint(0, 256, (10, 1, 50)))
# But if we explicitly set one of the number of channels to 50 it works
inferred_dim = infer_channel_dimension_format(np.random.randint(0, 256, (10, 1, 50)), num_channels=50)
self.assertEqual(inferred_dim, ChannelDimension.LAST)
# Test we correctly identify the channel dimension
image = np.random.randint(0, 256, (3, 4, 5))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.FIRST)
image = np.random.randint(0, 256, (1, 4, 5))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.FIRST)
image = np.random.randint(0, 256, (4, 5, 3))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.LAST)
image = np.random.randint(0, 256, (4, 5, 1))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.LAST)
# We can take a batched array of images and find the dimension
image = np.random.randint(0, 256, (1, 3, 4, 5))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.FIRST)
def test_get_channel_dimension_axis(self):
# Test we correctly identify the channel dimension
image = np.random.randint(0, 256, (3, 4, 5))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 0)
image = np.random.randint(0, 256, (1, 4, 5))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 0)
image = np.random.randint(0, 256, (4, 5, 3))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 2)
image = np.random.randint(0, 256, (4, 5, 1))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 2)
# We can take a batched array of images and find the dimension
image = np.random.randint(0, 256, (1, 3, 4, 5))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 1)
| transformers/tests/utils/test_image_utils.py/0 | {
"file_path": "transformers/tests/utils/test_image_utils.py",
"repo_id": "transformers",
"token_count": 18510
} | 584 |
import argparse
import os
import re
import subprocess
from typing import Optional
from huggingface_hub import paper_info
ROOT = os.getcwd().split("utils")[0]
DOCS_PATH = os.path.join(ROOT, "docs/source/en/model_doc")
MODELS_PATH = os.path.join(ROOT, "src/transformers/models")
COPYRIGHT_DISCLAIMER = """<!--Copyright 2025 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->"""
ARXIV_PAPERS_NOT_IN_HF_PAPERS = {
"gemma3n.md": "2506.06644",
"xmod.md": "2205.06266",
}
def get_modified_cards() -> list[str]:
"""Get the list of model names from modified files in docs/source/en/model_doc/"""
result = subprocess.check_output(["git", "status", "--porcelain"], text=True)
model_names = []
for line in result.strip().split("\n"):
if line:
# Split on whitespace and take the last part (filename)
filename = line.split()[-1]
if filename.startswith("docs/source/en/model_doc/") and filename.endswith(".md"):
model_name = os.path.splitext(os.path.basename(filename))[0]
if model_name not in ["auto", "timm_wrapper"]:
model_names.append(model_name)
return model_names
def get_paper_link(model_card: Optional[str], path: Optional[str]) -> str:
"""Get the first paper link from the model card content."""
if model_card is not None and not model_card.endswith(".md"):
model_card = f"{model_card}.md"
file_path = path or os.path.join(DOCS_PATH, f"{model_card}")
model_card = os.path.basename(file_path)
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
if "blog" in content or "report" in content or "post" in content:
print(f"Insert the release date of the blog post or technical report at the top of {model_card}")
return "blog"
# Find known paper links
paper_ids = re.findall(r"https://huggingface\.co/papers/\d+\.\d+", content)
paper_ids += re.findall(r"https://arxiv\.org/abs/\d+\.\d+", content)
# If no known paper links are found, look for other potential paper links
if len(paper_ids) == 0:
# Find all https links
all_https_links = re.findall(r"https://[^\s\)]+", content)
# Filter out huggingface.co and github links
other_paper_links = []
for link in all_https_links:
link = link.rstrip(".,;!?)")
if "huggingface.co" not in link and "github.com" not in link:
other_paper_links.append(link)
# Remove duplicates while preserving order
other_paper_links = list(dict.fromkeys(other_paper_links))
if other_paper_links:
print(f"No Hugging Face or Arxiv papers found. The possible paper links found in {model_card}:")
for link in other_paper_links:
print(f" - {link}")
return "No_paper"
return paper_ids[0]
def get_first_commit_date(model_name: Optional[str]) -> str:
"""Get the first commit date of the model's init file or model.md. This date is considered as the date the model was added to HF transformers"""
if model_name.endswith(".md"):
model_name = f"{model_name[:-3]}"
model_name_src = model_name
if "-" in model_name:
model_name_src = model_name.replace("-", "_")
file_path = os.path.join(MODELS_PATH, model_name_src, "__init__.py")
# If the init file is not found (only true for legacy models), the doc's first commit date is used
if not os.path.exists(file_path):
file_path = os.path.join(DOCS_PATH, f"{model_name}.md")
result = subprocess.check_output(
["git", "log", "--reverse", "--pretty=format:%ad", "--date=iso", file_path], text=True
)
return result.strip().split("\n")[0][:10]
def get_release_date(link: str) -> str:
if link.startswith("https://huggingface.co/papers/"):
link = link.replace("https://huggingface.co/papers/", "")
try:
info = paper_info(link)
return info.published_at.date().isoformat()
except Exception as e:
print(f"Error fetching release date for the paper https://huggingface.co/papers/{link}: {e}")
elif link.startswith("https://arxiv.org/abs/"):
print(f"This paper {link} is not yet available in Hugging Face papers, skipping the release date attachment.")
return r"{release_date}"
def replace_paper_links(file_path: str) -> bool:
"""Replace arxiv links with huggingface links if valid, and replace hf.co with huggingface.co"""
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
model_card = os.path.basename(file_path)
original_content = content
# Replace hf.co with huggingface.co
content = content.replace("https://hf.co/", "https://huggingface.co/")
# Find all arxiv links
arxiv_links = re.findall(r"https://arxiv\.org/abs/(\d+\.\d+)", content)
for paper_id in arxiv_links:
try:
# Check if paper exists on huggingface
paper_info(paper_id)
# If no exception, replace the link
old_link = f"https://arxiv.org/abs/{paper_id}"
new_link = f"https://huggingface.co/papers/{paper_id}"
content = content.replace(old_link, new_link)
print(f"Replaced {old_link} with {new_link}")
except Exception:
# Paper not available on huggingface, keep arxiv link
print(f"Paper {paper_id} for {model_card} is not available on huggingface, keeping the arxiv link")
continue
# Write back only if content changed
if content != original_content:
with open(file_path, "w", encoding="utf-8") as f:
f.write(content)
return True
return False
def insert_dates(model_card_list: list[str]):
"""Insert release and commit dates into model cards"""
for model_card in model_card_list:
if not model_card.endswith(".md"):
model_card = f"{model_card}.md"
if model_card == "auto.md" or model_card == "timm_wrapper.md":
continue
file_path = os.path.join(DOCS_PATH, model_card)
# First replace arxiv paper links with hf paper link if possible
links_replaced = replace_paper_links(file_path)
if links_replaced:
print(f"Updated paper links in {model_card}")
pattern = (
r"\n\*This model was released on (.*) and added to Hugging Face Transformers on (\d{4}-\d{2}-\d{2})\.\*"
)
# Check if the copyright disclaimer sections exists, if not, add one with 2025
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
markers = list(re.finditer(r"-->", content)) # Dates info is placed right below this marker
if len(markers) == 0:
print(f"No marker found in {model_card}. Adding copyright disclaimer to the top.")
# Add copyright disclaimer to the very top of the file
content = COPYRIGHT_DISCLAIMER + "\n\n" + content
with open(file_path, "w", encoding="utf-8") as f:
f.write(content)
markers = list(re.finditer(r"-->", content))
hf_commit_date = get_first_commit_date(model_name=model_card)
match = re.search(pattern, content)
# If the dates info line already exists, only check and update the hf_commit_date, don't modify the existing release date
if match:
release_date = match.group(1) # The release date part
existing_hf_date = match.group(2) # The existing HF date part
if existing_hf_date != hf_commit_date:
old_line = match.group(0) # Full matched line
new_line = f"\n*This model was released on {release_date} and added to Hugging Face Transformers on {hf_commit_date}.*"
content = content.replace(old_line, new_line)
with open(file_path, "w", encoding="utf-8") as f:
f.write(content)
# If the dates info line does not exist, add it
else:
paper_link = get_paper_link(model_card=model_card, path=file_path)
release_date = ""
if not (paper_link == "No_paper" or paper_link == "blog"):
release_date = get_release_date(paper_link)
else:
release_date = r"{release_date}"
insert_index = markers[0].end()
date_info = f"\n*This model was released on {release_date} and added to Hugging Face Transformers on {hf_commit_date}.*"
content = content[:insert_index] + date_info + content[insert_index:]
with open(file_path, "w", encoding="utf-8") as f:
f.write(content)
print(f"Added {model_card} release and commit dates.")
def get_all_model_cards():
"""Get all model cards from the docs path"""
all_files = os.listdir(DOCS_PATH)
model_cards = []
for file in all_files:
if file.endswith(".md"):
model_name = os.path.splitext(file)[0]
if model_name not in ["auto", "timm_wrapper"]:
model_cards.append(model_name)
return sorted(model_cards)
def main(all=False, auto=True, models=None):
if all:
model_cards = get_all_model_cards()
print(f"Processing all {len(model_cards)} model cards from docs directory")
elif auto:
model_cards = get_modified_cards()
if not model_cards:
print("No modified model cards found.")
return
print(f"Processing modified model cards: {model_cards}")
else:
model_cards = models
print(f"Processing specified model cards: {model_cards}")
insert_dates(model_cards)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Add release and commit dates to model cards")
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"--auto", action="store_true", help="Automatically process modified model cards from git status"
)
group.add_argument("--models", nargs="+", help="Specify model cards to process (without .md extension)")
group.add_argument("--all", action="store_true", help="Process all model cards in the docs directory")
parser.set_defaults(auto=True)
args = parser.parse_args()
main(args.all, args.auto, args.models)
| transformers/utils/add_dates.py/0 | {
"file_path": "transformers/utils/add_dates.py",
"repo_id": "transformers",
"token_count": 4475
} | 585 |
import argparse
import json
import subprocess
def get_runner_status(target_runners, token):
offline_runners = []
cmd = [
"curl",
"-H",
"Accept: application/vnd.github+json",
"-H",
f"Authorization: Bearer {token}",
"https://api.github.com/repos/huggingface/transformers/actions/runners",
]
output = subprocess.run(cmd, check=False, shell=True, stdout=subprocess.PIPE)
o = output.stdout.decode("utf-8")
status = json.loads(o)
runners = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(runner)
# save the result so we can report them on Slack
with open("offline_runners.txt", "w") as fp:
fp.write(json.dumps(offline_runners))
if len(offline_runners) > 0:
failed = "\n".join([x["name"] for x in offline_runners])
raise ValueError(f"The following runners are offline:\n{failed}")
if __name__ == "__main__":
def list_str(values):
return values.split(",")
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
args = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| transformers/utils/check_self_hosted_runner.py/0 | {
"file_path": "transformers/utils/check_self_hosted_runner.py",
"repo_id": "transformers",
"token_count": 650
} | 586 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def get_daily_ci_runs(token, num_runs=7, workflow_id=None):
"""Get the workflow runs of the scheduled (daily) CI.
This only selects the runs triggered by the `schedule` event on the `main` branch.
"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
# The id of a workflow (not of a workflow run).
# From a given workflow run (where we have workflow run id), we can get the workflow id by going to
# https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}
# and check the `workflow_id` key.
if not workflow_id:
workflow_run_id = os.environ["GITHUB_RUN_ID"]
workflow_run = requests.get(
f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}", headers=headers
).json()
workflow_id = workflow_run["workflow_id"]
url = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&exclude_pull_requests=true&per_page={num_runs}"
result = requests.get(f"{url}&event=schedule", headers=headers).json()
workflow_runs = result["workflow_runs"]
if len(workflow_runs) == 0:
result = requests.get(f"{url}&event=workflow_run", headers=headers).json()
workflow_runs = result["workflow_runs"]
return workflow_runs
def get_last_daily_ci_run(token, workflow_run_id=None, workflow_id=None, commit_sha=None):
"""Get the last completed workflow run id of the scheduled (daily) CI."""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
workflow_run = None
if workflow_run_id is not None and workflow_run_id != "":
workflow_run = requests.get(
f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}", headers=headers
).json()
return workflow_run
workflow_runs = get_daily_ci_runs(token, workflow_id=workflow_id)
for run in workflow_runs:
if commit_sha in [None, ""] and run["status"] == "completed":
workflow_run = run
break
# if `commit_sha` is specified, return the latest completed run with `workflow_run["head_sha"]` matching the specified sha.
elif commit_sha not in [None, ""] and run["head_sha"] == commit_sha and run["status"] == "completed":
workflow_run = run
break
return workflow_run
def get_last_daily_ci_workflow_run_id(token, workflow_run_id=None, workflow_id=None, commit_sha=None):
"""Get the last completed workflow run id of the scheduled (daily) CI."""
if workflow_run_id is not None and workflow_run_id != "":
return workflow_run_id
workflow_run = get_last_daily_ci_run(token, workflow_id=workflow_id, commit_sha=commit_sha)
workflow_run_id = None
if workflow_run is not None:
workflow_run_id = workflow_run["id"]
return workflow_run_id
def get_last_daily_ci_run_commit(token, workflow_run_id=None, workflow_id=None, commit_sha=None):
"""Get the commit sha of the last completed scheduled daily CI workflow run."""
workflow_run = get_last_daily_ci_run(
token, workflow_run_id=workflow_run_id, workflow_id=workflow_id, commit_sha=commit_sha
)
workflow_run_head_sha = None
if workflow_run is not None:
workflow_run_head_sha = workflow_run["head_sha"]
return workflow_run_head_sha
def get_last_daily_ci_artifacts(
output_dir,
token,
workflow_run_id=None,
workflow_id=None,
commit_sha=None,
artifact_names=None,
):
"""Get the artifacts of last completed workflow run id of the scheduled (daily) CI."""
workflow_run_id = get_last_daily_ci_workflow_run_id(
token, workflow_run_id=workflow_run_id, workflow_id=workflow_id, commit_sha=commit_sha
)
if workflow_run_id is not None:
artifacts_links = get_artifacts_links(worflow_run_id=workflow_run_id, token=token)
if artifact_names is None:
artifact_names = artifacts_links.keys()
downloaded_artifact_names = []
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
artifact_url = artifacts_links[artifact_name]
download_artifact(
artifact_name=artifact_name, artifact_url=artifact_url, output_dir=output_dir, token=token
)
downloaded_artifact_names.append(artifact_name)
return downloaded_artifact_names
def get_last_daily_ci_reports(
output_dir,
token,
workflow_run_id=None,
workflow_id=None,
commit_sha=None,
artifact_names=None,
):
"""Get the artifacts' content of the last completed workflow run id of the scheduled (daily) CI."""
downloaded_artifact_names = get_last_daily_ci_artifacts(
output_dir,
token,
workflow_run_id=workflow_run_id,
workflow_id=workflow_id,
commit_sha=commit_sha,
artifact_names=artifact_names,
)
results = {}
for artifact_name in downloaded_artifact_names:
artifact_zip_path = os.path.join(output_dir, f"{artifact_name}.zip")
if os.path.isfile(artifact_zip_path):
target_dir = os.path.join(output_dir, artifact_name)
with zipfile.ZipFile(artifact_zip_path) as z:
z.extractall(target_dir)
results[artifact_name] = {}
filename = os.listdir(target_dir)
for filename in filename:
file_path = os.path.join(target_dir, filename)
if not os.path.isdir(file_path):
# read the file
with open(file_path) as fp:
content = fp.read()
results[artifact_name][filename] = content
return results
| transformers/utils/get_previous_daily_ci.py/0 | {
"file_path": "transformers/utils/get_previous_daily_ci.py",
"repo_id": "transformers",
"token_count": 2512
} | 587 |
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility that prepares the repository for releases (or patches) by updating all versions in the relevant places. It
also performs some post-release cleanup, by updating the links in the main README to respective model doc pages (from
main to stable).
To prepare for a release, use from the root of the repo on the release branch with:
```bash
python release.py
```
or use `make pre-release`.
To prepare for a patch release, use from the root of the repo on the release branch with:
```bash
python release.py --patch
```
or use `make pre-patch`.
To do the post-release cleanup, use from the root of the repo on the main branch with:
```bash
python release.py --post_release
```
or use `make post-release`.
"""
import argparse
import os
import re
from pathlib import Path
import packaging.version
# All paths are defined with the intent that this script should be run from the root of the repo.
PATH_TO_EXAMPLES = "examples/"
PATH_TO_MODELS = "src/transformers/models"
# This maps a type of file to the pattern to look for when searching where the version is defined, as well as the
# template to follow when replacing it with the new version.
REPLACE_PATTERNS = {
"examples": (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
"init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
"setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
"uv_script_release": (
re.compile(r'^# "transformers(\[.+\])?.*$', re.MULTILINE),
r'# "transformers\g<1>==VERSION",',
),
"uv_script_dev": (
re.compile(r'^# "transformers(\[.+\])?.*$', re.MULTILINE),
r'# "transformers\g<1> @ git+https://github.com/huggingface/transformers.git",',
),
}
# This maps a type of file to its path in Transformers
REPLACE_FILES = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
README_FILE = "README.md"
UV_SCRIPT_MARKER = "# /// script"
def update_version_in_file(fname: str, version: str, file_type: str):
"""
Update the version of Transformers in one file.
Args:
fname (`str`): The path to the file where we want to update the version.
version (`str`): The new version to set in the file.
file_type (`str`): The type of the file (should be a key in `REPLACE_PATTERNS`).
"""
with open(fname, "r", encoding="utf-8", newline="\n") as f:
code = f.read()
re_pattern, replace = REPLACE_PATTERNS[file_type]
replace = replace.replace("VERSION", version)
code = re_pattern.sub(replace, code)
with open(fname, "w", encoding="utf-8", newline="\n") as f:
f.write(code)
def update_version_in_examples(version: str, patch: bool = False):
"""
Update the version in all examples files.
Args:
version (`str`): The new version to set in the examples.
patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release.
"""
for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES):
# Removing some of the folders with non-actively maintained examples from the walk
if "legacy" in directories:
directories.remove("legacy")
for fname in fnames:
if fname.endswith(".py"):
if UV_SCRIPT_MARKER in Path(folder, fname).read_text():
# Update the depdendencies in UV scripts
uv_script_file_type = "uv_script_dev" if ".dev" in version else "uv_script_release"
update_version_in_file(os.path.join(folder, fname), version, file_type=uv_script_file_type)
if not patch:
# We don't update the version in the examples for patch releases.
update_version_in_file(os.path.join(folder, fname), version, file_type="examples")
def global_version_update(version: str, patch: bool = False):
"""
Update the version in all needed files.
Args:
version (`str`): The new version to set everywhere.
patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release.
"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(fname, version, pattern)
update_version_in_examples(version, patch=patch)
def remove_conversion_scripts():
"""
Delete the scripts that convert models from older, unsupported formats. We don't want to include these
in release wheels because they often have to open insecure file types (pickle, Torch .bin models). This results in
vulnerability scanners flagging us and can cause compliance issues for users with strict security policies.
"""
model_dir = Path(PATH_TO_MODELS)
for conversion_script in list(model_dir.glob("**/convert*.py")):
conversion_script.unlink()
def get_version() -> packaging.version.Version:
"""
Reads the current version in the main __init__.
"""
with open(REPLACE_FILES["init"], "r") as f:
code = f.read()
default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0]
return packaging.version.parse(default_version)
def pre_release_work(patch: bool = False):
"""
Do all the necessary pre-release steps:
- figure out the next minor release version and ask confirmation
- update the version everywhere
- clean-up the model list in the main README
Args:
patch (`bool`, *optional*, defaults to `False`): Whether or not this is a patch release.
"""
# First let's get the default version: base version if we are in dev, bump minor otherwise.
default_version = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!")
if default_version.is_devrelease:
default_version = default_version.base_version
elif patch:
default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
default_version = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if we have found the right version.
version = input(f"Which version are you releasing? [{default_version}]")
if len(version) == 0:
version = default_version
print(f"Updating version to {version}.")
global_version_update(version, patch=patch)
print("Deleting conversion scripts.")
remove_conversion_scripts()
def post_release_work():
"""
Do all the necessary post-release steps:
- figure out the next dev version and ask confirmation
- update the version everywhere
- clean-up the model list in the main README
"""
# First let's get the current version
current_version = get_version()
dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
current_version = current_version.base_version
# Check with the user we got that right.
version = input(f"Which version are we developing now? [{dev_version}]")
if len(version) == 0:
version = dev_version
print(f"Updating version to {version}.")
global_version_update(version)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
args = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| transformers/utils/release.py/0 | {
"file_path": "transformers/utils/release.py",
"repo_id": "transformers",
"token_count": 2975
} | 588 |
from transformers import LlavaOnevisionVideoProcessor
class CustomVideoProcessor(LlavaOnevisionVideoProcessor):
pass
| transformers/utils/test_module/custom_video_processing.py/0 | {
"file_path": "transformers/utils/test_module/custom_video_processing.py",
"repo_id": "transformers",
"token_count": 34
} | 589 |
# Aligning Text-to-Image Diffusion Models with Reward Backpropagation
[](https://huggingface.co/models?other=alignprop,trl)
## The why
If your reward function is differentiable, directly backpropagating gradients from the reward models to the diffusion model is significantly more sample and compute efficient (25x) than doing policy gradient algorithm like DDPO.
AlignProp does full backpropagation through time, which allows updating the earlier steps of denoising via reward backpropagation.
<div style="text-align: center"><img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/reward_tuning.png"/></div>
## Getting started with `examples/scripts/alignprop.py`
The `alignprop.py` script is a working example of using the `AlignProp` trainer to finetune a Stable Diffusion model. This example explicitly configures a small subset of the overall parameters associated with the config object (`AlignPropConfig`).
**Note:** one A100 GPU is recommended to get this running. For lower memory setting, consider setting truncated_backprop_rand to False. With default settings this will do truncated backpropagation with K=1.
Almost every configuration parameter has a default. There is only one commandline flag argument that is required of the user to get things up and running. The user is expected to have a [huggingface user access token](https://huggingface.co/docs/hub/security-tokens) that will be used to upload the model post-finetuning to HuggingFace hub. The following bash command is to be entered to get things running
```batch
python alignprop.py --hf_user_access_token <token>
```
To obtain the documentation of `stable_diffusion_tuning.py`, please run `python stable_diffusion_tuning.py --help`
The following are things to keep in mind (The code checks this for you as well) in general while configuring the trainer (beyond the use case of using the example script)
- The configurable randomized truncation range (`--alignprop_config.truncated_rand_backprop_minmax=(0,50)`) the first number should be equal and greater than 0, while the second number should equal or less to the number of diffusion timesteps (sample_num_steps)
- The configurable truncation backprop absolute step (`--alignprop_config.truncated_backprop_timestep=49`) the number should be less than the number of diffusion timesteps (sample_num_steps), it only matters when truncated_backprop_rand is set to False
## Setting up the image logging hook function
Expect the function to be given a dictionary with keys
```python
['image', 'prompt', 'prompt_metadata', 'rewards']
```
and `image`, `prompt`, `prompt_metadata`, `rewards`are batched.
You are free to log however you want the use of `wandb` or `tensorboard` is recommended.
### Key terms
- `rewards` : The rewards/score is a numerical associated with the generated image and is key to steering the RL process
- `prompt` : The prompt is the text that is used to generate the image
- `prompt_metadata` : The prompt metadata is the metadata associated with the prompt. A situation where this will not be empty is when the reward model comprises of a [`FLAVA`](https://huggingface.co/docs/transformers/model_doc/flava) setup where questions and ground answers (linked to the generated image) are expected with the generated image (See here: https://github.com/kvablack/ddpo-pytorch/blob/main/ddpo_pytorch/rewards.py#L45)
- `image` : The image generated by the Stable Diffusion model
Example code for logging sampled images with `wandb` is given below.
```python
# for logging these images to wandb
def image_outputs_hook(image_data, global_step, accelerate_logger):
# For the sake of this example, we only care about the last batch
# hence we extract the last element of the list
result = {}
images, prompts, rewards = [image_data['images'],image_data['prompts'],image_data['rewards']]
for i, image in enumerate(images):
pil = Image.fromarray(
(image.cpu().numpy().transpose(1, 2, 0) * 255).astype(np.uint8)
)
pil = pil.resize((256, 256))
result[f"{prompts[i]:.25} | {rewards[i]:.2f}"] = [pil]
accelerate_logger.log_images(
result,
step=global_step,
)
```
### Using the finetuned model
Assuming you've done with all the epochs and have pushed up your model to the hub, you can use the finetuned model as follows
```python
from diffusers import StableDiffusionPipeline
pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
pipeline.to("cuda")
pipeline.load_lora_weights('mihirpd/alignprop-trl-aesthetics')
prompts = ["squirrel", "crab", "starfish", "whale","sponge", "plankton"]
results = pipeline(prompts)
for prompt, image in zip(prompts,results.images):
image.save(f"dump/{prompt}.png")
```
## Credits
This work is heavily influenced by the repo [here](https://github.com/mihirp1998/AlignProp/) and the associated paper [Aligning Text-to-Image Diffusion Models with Reward Backpropagation
by Mihir Prabhudesai, Anirudh Goyal, Deepak Pathak, Katerina Fragkiadaki](https://huggingface.co/papers/2310.03739).
| trl/docs/source/alignprop_trainer.md/0 | {
"file_path": "trl/docs/source/alignprop_trainer.md",
"repo_id": "trl",
"token_count": 1570
} | 590 |
# Other
## profiling_decorator
[[autodoc]] extras.profiling.profiling_decorator
## profiling_context
[[autodoc]] extras.profiling.profiling_context
| trl/docs/source/others.md/0 | {
"file_path": "trl/docs/source/others.md",
"repo_id": "trl",
"token_count": 51
} | 591 |
# Using LLaMA models with TRL
We've begun rolling out examples to use Meta's LLaMA models in `trl` (see [Meta's LLaMA release](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) for the original LLaMA model).
## Efficient training strategies
Even training the smallest LLaMA model requires an enormous amount of memory. Some quick math: in bf16, every parameter uses 2 bytes (in fp32 4 bytes) in addition to 8 bytes used, e.g., in the Adam optimizer (see the [performance docs](https://huggingface.co/docs/transformers/perf_train_gpu_one#optimizer) in Transformers for more info). So a 7B parameter model would use `(2+8)*7B=70GB` just to fit in memory and would likely need more when you compute intermediate values such as attention scores. So you couldn’t train the model even on a single 80GB A100 like that. You can use some tricks, like more efficient optimizers of half-precision training, to squeeze a bit more into memory, but you’ll run out sooner or later.
Another option is to use Parameter-Efficient Fine-Tuning (PEFT) techniques, such as the [`peft`](https://github.com/huggingface/peft) library, which can perform low-rank adaptation (LoRA) on a model loaded in 8-bit.
For more on `peft` + `trl`, see the [Peft integration](peft_integration) docs.
Loading the model in 8bit reduces the memory footprint drastically since you only need one byte per parameter for the weights (e.g. 7B LlaMa is 7GB in memory).
Instead of training the original weights directly, LoRA adds small adapter layers on top of some specific layers (usually the attention layers); thus, the number of trainable parameters is drastically reduced.
In this scenario, a rule of thumb is to allocate ~1.2-1.4GB per billion parameters (depending on the batch size and sequence length) to fit the entire fine-tuning setup.
This enables fine-tuning larger models (up to 50-60B scale models on a NVIDIA A100 80GB) at low cost.
Now we can fit very large models into a single GPU, but the training might still be very slow.
The simplest strategy in this scenario is data parallelism: we replicate the same training setup into separate GPUs and pass different batches to each GPU.
With this, you can parallelize the forward/backward passes of the model and scale with the number of GPUs.

We use either the `transformers.Trainer` or `accelerate`, which both support data parallelism without any code changes, by simply passing arguments when calling the scripts with `torchrun` or `accelerate launch`. The following runs a training script with 8 GPUs on a single machine with `accelerate` and `torchrun`, respectively.
```bash
accelerate launch --multi_gpu --num_machines 1 --num_processes 8 my_accelerate_script.py
torchrun --nnodes 1 --nproc_per_node 8 my_torch_script.py
```
## Supervised fine-tuning
Before we start training reward models and tuning our model with RL, it helps if the model is already good in the domain we are interested in.
In our case, we want it to answer questions, while for other use cases, we might want it to follow instructions, in which case instruction tuning is a great idea.
The easiest way to achieve this is by continuing to train the language model with the language modeling objective on texts from the domain or task.
The [StackExchange dataset](https://huggingface.co/datasets/HuggingFaceH4/stack-exchange-preferences) is enormous (over 10 million instructions), so we can easily train the language model on a subset of it.
There is nothing special about fine-tuning the model before doing RLHF - it’s just the causal language modeling objective from pretraining that we apply here.
To use the data efficiently, we use a technique called packing: instead of having one text per sample in the batch and then padding to either the longest text or the maximal context of the model, we concatenate a lot of texts with a EOS token in between and cut chunks of the context size to fill the batch without any padding.

With this approach the training is much more efficient as each token that is passed through the model is also trained in contrast to padding tokens which are usually masked from the loss.
If you don't have much data and are more concerned about occasionally cutting off some tokens that are overflowing the context you can also use a classical data loader.
```python
# load model in 8bit
model = AutoModelForCausalLM.from_pretrained(
args.model_path,
load_in_8bit=True,
device_map={"": Accelerator().local_process_index}
)
model = prepare_model_for_kbit_training(model)
# add LoRA to model
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
```
We train the model for a few thousand steps with the causal language modeling objective and save the model.
Since we will tune the model again with different objectives, we merge the adapter weights with the original model weights.
**Disclaimer:** due to LLaMA's license, we release only the adapter weights for this and the model checkpoints in the following sections.
You can apply for access to the base model's weights by filling out Meta AI's [form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform) and then converting them to the 🤗 Transformers format by running this [script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py).
Note that you'll also need to install 🤗 Transformers from source until the `v4.28` is released.
Now that we have fine-tuned the model for the task, we are ready to train a reward model.
## Reward modeling and human preferences
In principle, we could fine-tune the model using RLHF directly with the human annotations.
However, this would require us to send some samples to humans for rating after each optimization iteration.
This is expensive and slow due to the number of training samples needed for convergence and the inherent latency of human reading and annotator speed.
A trick that works well instead of direct feedback is training a reward model on human annotations collected before the RL loop.
The goal of the reward model is to imitate how a human would rate a text. There are several possible strategies to build a reward model: the most straightforward way would be to predict the annotation (e.g. a rating score or a binary value for “good”/”bad”).
In practice, what works better is to predict the ranking of two examples, where the reward model is presented with two candidates `(y_k, y_j)` for a given prompt `x` and has to predict which one would be rated higher by a human annotator.
With the StackExchange dataset, we can infer which of the two answers was preferred by the users based on the score.
With that information and the loss defined above, we can then modify the `transformers.Trainer` by adding a custom loss function.
```python
class RewardTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
rewards_j = model(input_ids=inputs["input_ids_j"], attention_mask=inputs["attention_mask_j"])[0]
rewards_k = model(input_ids=inputs["input_ids_k"], attention_mask=inputs["attention_mask_k"])[0]
loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean()
if return_outputs:
return loss, {"rewards_j": rewards_j, "rewards_k": rewards_k}
return loss
```
We utilize a subset of a 100,000 pair of candidates and evaluate on a held-out set of 50,000. With a modest training batch size of 4, we train the Llama model using the LoRA `peft` adapter for a single epoch using the Adam optimizer with BF16 precision. Our LoRA configuration is:
```python
peft_config = LoraConfig(
task_type=TaskType.SEQ_CLS,
inference_mode=False,
r=8,
lora_alpha=32,
lora_dropout=0.1,
)
```
As detailed in the next section, the resulting adapter can be merged into the frozen model and saved for further downstream use.
## Reinforcement Learning from Human Feedback
With the fine-tuned language model and the reward model at hand, we are now ready to run the RL loop. It follows roughly three steps:
1. Generate responses from prompts,
2. Rate the responses with the reward model,
3. Run a reinforcement learning policy-optimization step with the ratings.
The Query and Response prompts are templated as follows before being tokenized and passed to the model:
```bash
Question: <Query>
Answer: <Response>
```
The same template was used for SFT, RM and RLHF stages.
Once more, we utilize `peft` for memory-efficient training, which offers an extra advantage in the RLHF context.
Here, the reference model and policy share the same base, the SFT model, which we load in 8-bit and freeze during training.
We exclusively optimize the policy's LoRA weights using PPO while sharing the base model's weights.
```python
for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)):
question_tensors = batch["input_ids"]
# sample from the policy and to generate responses
response_tensors = ppo_trainer.generate(
question_tensors,
return_prompt=False,
length_sampler=output_length_sampler,
**generation_kwargs,
)
batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True)
# Compute sentiment score
texts = [q + r for q, r in zip(batch["query"], batch["response"])]
pipe_outputs = sentiment_pipe(texts, **sent_kwargs)
rewards = [torch.tensor(output[0]["score"] - script_args.reward_baseline) for output in pipe_outputs]
# Run PPO step
stats = ppo_trainer.step(question_tensors, response_tensors, rewards)
# Log stats to Wandb
ppo_trainer.log_stats(stats, batch, rewards)
```
For the rest of the details and evaluation, please refer to our [blog post on StackLLaMA](https://huggingface.co/blog/stackllama).
| trl/docs/source/using_llama_models.md/0 | {
"file_path": "trl/docs/source/using_llama_models.md",
"repo_id": "trl",
"token_count": 2915
} | 592 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional
from datasets import load_dataset
from huggingface_hub import ModelCard
from transformers import HfArgumentParser
@dataclass
class ScriptArguments:
r"""
Arguments for the script.
Args:
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether to push the dataset to the Hugging Face Hub.
repo_id (`str`, *optional*, defaults to `"trl-lib/math_shepherd"`):
Hugging Face repository ID to push the dataset to.
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
Number of workers to use for dataset processing.
"""
push_to_hub: bool = field(
default=False,
metadata={"help": "Whether to push the dataset to the Hugging Face Hub."},
)
repo_id: str = field(
default="trl-lib/math_shepherd",
metadata={"help": "Hugging Face repository ID to push the dataset to."},
)
dataset_num_proc: Optional[int] = field(
default=None,
metadata={"help": "Number of workers to use for dataset processing."},
)
def process_example(example):
# Replace "ки" with "ⶻ" so that the size of the "input" matches the size of the "label"
inputs = example["input"].replace("ки", "ⶻ")
# Find the indices of the "ⶻ" characters (that should match with the indexes of the "+" or "-" in the label)
indexes = [m.start() for m in re.finditer("ⶻ", inputs)]
# Sanity that all indexes are either "+" or "-"
assert all(example["label"][idx] in ["+", "-"] for idx in indexes)
# Get the labels
labels = [example["label"][idx] == "+" for idx in indexes]
# Split the inputs into steps (caution, the first step is missing here, it is the prompt)
steps = [inputs[i:j] for i, j in zip(chain([0], indexes), chain(indexes, [None]))]
# Remove the last step (single ⶻ)
steps = steps[:-1]
# Get the prompt (first part) and completions (rest)
prompt = steps[0]
completions = steps[1:]
# Remove the heading "ⶻ" and the final whitespace from the completions
assert all(completion.startswith("ⶻ") for completion in completions)
completions = [completion[1:].strip() for completion in completions]
# At this point, we need to retrieve the first step from the prompt.
# First, we handle particular cases (annotation error) where we have a first label before the end of the prompt.
if prompt.startswith(
(
"Mr. Rocky",
"Parker",
"What is the smallest positive",
" The Myth",
"Let $\\mathbf{a}$",
"Find the arithmetic",
"Determine an ordered pair",
"Determine the ordered pair",
"At the Quill and Scroll stationery",
"Round to the nearest",
r"Calculate $\sqrt{10p}",
r"Simplify $\sqrt{28x}",
)
):
# Some spotted datasets errors where there is an annotation in the prompt: we remove it
labels = labels[1:]
# Then we handle the general case: we get the first step from the prompt by looking for "Step 1:" or "step 1:" or
# (less common) "?".
elif "Step 1:" in prompt:
prompt, first_step = prompt.split("Step 1:")
first_step = "Step 1:" + first_step
completions = [first_step.strip()] + completions
elif "step 1:" in prompt:
prompt, first_step = prompt.split("step 1:")
first_step = "step 1:" + first_step
completions = [first_step.strip()] + completions
elif "?" in prompt:
prompt, first_step = prompt.split("?")
prompt = prompt + "?"
completions = [first_step.strip()] + completions
else:
raise ValueError(f"Prompt can't be processed: {prompt}")
# Strip the prompt
prompt = prompt.strip()
# Sanity check that the length of the completions is the same as the length of the labels
assert len(completions) == len(labels)
return {"prompt": prompt, "completions": completions, "labels": labels}
model_card = ModelCard("""
---
tags: [trl]
---
# Math-Shepherd Dataset
## Summary
The Math-Shepherd dataset is a processed version of [Math-Shepherd dataset](peiyi9979/Math-Shepherd), designed to train models using the [TRL library](https://github.com/huggingface/trl) for stepwise supervision tasks. It provides step-by-step solutions to mathematical problems, enabling models to learn and verify each step of a solution, thereby enhancing their reasoning capabilities.
## Data Structure
- **Format**: [Standard](https://huggingface.co/docs/trl/main/dataset_formats#standard)
- **Type**: [Stepwise supervision](https://huggingface.co/docs/trl/main/dataset_formats#stepwise-supervision)
Columns:
- `"prompt"`: The problem statement.
- `"completions"`: A list of reasoning steps generated to solve the problem.
- `"labels"`: A list of booleans or floats indicating the correctness of each corresponding reasoning step.
This structure allows models to learn the correctness of each step in a solution, facilitating improved reasoning and problem-solving abilities.
## Generation script
The script used to generate this dataset can be found [here](https://github.com/huggingface/trl/blob/main/examples/datasets/math_shepherd.py).
""")
if __name__ == "__main__":
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
dataset = load_dataset("peiyi9979/Math-Shepherd", split="train")
dataset = dataset.map(
process_example,
remove_columns=["input", "label", "task"],
num_proc=script_args.dataset_num_proc,
)
dataset = dataset.train_test_split(test_size=0.05, seed=42)
if script_args.push_to_hub:
dataset.push_to_hub(script_args.repo_id)
model_card.push_to_hub(script_args.repo_id, repo_type="dataset")
| trl/examples/datasets/math_shepherd.py/0 | {
"file_path": "trl/examples/datasets/math_shepherd.py",
"repo_id": "trl",
"token_count": 2327
} | 593 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import config
import torch
from custom_trainer import LayerSkipSFTTrainer
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from trl import DataCollatorForCompletionOnlyLM, SFTConfig
def formatting_prompts_func(example):
text = f"### Instruction: {example['utterance']}\n ### Response: {example['semantic_parse']}"
# Inject eos_token as a string before tokenization, because they are not always added
# See: https://github.com/huggingface/transformers/issues/22794 and
# https://github.com/huggingface/trl/issues/1623
if tokenizer.eos_token: # usually something like "</s>" for GPT2 or "<|endoftext|>"
text += f"{tokenizer.eos_token}"
return text
if __name__ == "__main__":
# load the dataset
print("[INFO] loading the dataset...")
train_dataset = load_dataset(config.dataset_name, split="train")
print(f"output_root_dir: {config.output_root_dir}")
print(f"hub_model_id: {config.hub_model_id}")
# load the model and tokenizer
print("[INFO] loading the model and tokenizer...")
model = AutoModelForCausalLM.from_pretrained(config.model_name, device_map="auto", torch_dtype=torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name, add_eos_token=True)
# adding pad and eos tokens if not provided in the tokenizer
if tokenizer.pad_token is None:
# Add '[PAD]' token if it doesn't exist
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
model.resize_token_embeddings(len(tokenizer))
model.config.pad_token_id = tokenizer.pad_token_id
if tokenizer.eos_token is None or tokenizer.eos_token == tokenizer.bos_token:
# Add '[EOS]' token if it doesn't exist
tokenizer.add_special_tokens({"eos_token": "[EOS]"})
model.resize_token_embeddings(len(tokenizer))
model.config.eos_token_id = tokenizer.eos_token_id
response_template = " ### Response:"
collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer)
args = SFTConfig(
do_train=True,
bf16=True,
max_seq_length=None,
per_device_train_batch_size=config.per_device_train_batch_size,
gradient_accumulation_steps=config.gradient_accumulation_steps,
learning_rate=config.learning_rate,
packing=False,
num_train_epochs=1.0,
report_to="none",
push_to_hub=True,
hub_model_id=config.hub_model_id,
output_dir=config.output_dir,
save_steps=1000,
save_total_limit=2,
)
trainer = LayerSkipSFTTrainer(
model,
train_dataset=train_dataset,
args=args,
formatting_func=formatting_prompts_func,
data_collator=collator,
)
trainer.train()
| trl/examples/research_projects/layer_skip/scripts/layer_skip_sft.py/0 | {
"file_path": "trl/examples/research_projects/layer_skip/scripts/layer_skip_sft.py",
"repo_id": "trl",
"token_count": 1279
} | 594 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# /// script
# dependencies = [
# "trl @ git+https://github.com/huggingface/trl.git",
# ]
# ///
"""
python examples/scripts/ddpo.py \
--num_epochs=200 \
--train_gradient_accumulation_steps=1 \
--sample_num_steps=50 \
--sample_batch_size=6 \
--train_batch_size=3 \
--sample_num_batches_per_epoch=4 \
--per_prompt_stat_tracking=True \
--per_prompt_stat_tracking_buffer_size=32 \
--tracker_project_name="stable_diffusion_training" \
--log_with="wandb"
"""
import os
from dataclasses import dataclass, field
import numpy as np
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import EntryNotFoundError
from transformers import CLIPModel, CLIPProcessor, HfArgumentParser, is_torch_npu_available, is_torch_xpu_available
from trl import DDPOConfig, DDPOTrainer, DefaultDDPOStableDiffusionPipeline
@dataclass
class ScriptArguments:
r"""
Arguments for the script.
Args:
pretrained_model (`str`, *optional*, defaults to `"runwayml/stable-diffusion-v1-5"`):
Pretrained model to use.
pretrained_revision (`str`, *optional*, defaults to `"main"`):
Pretrained model revision to use.
hf_hub_model_id (`str`, *optional*, defaults to `"ddpo-finetuned-stable-diffusion"`):
HuggingFace repo to save model weights to.
hf_hub_aesthetic_model_id (`str`, *optional*, defaults to `"trl-lib/ddpo-aesthetic-predictor"`):
Hugging Face model ID for aesthetic scorer model weights.
hf_hub_aesthetic_model_filename (`str`, *optional*, defaults to `"aesthetic-model.pth"`):
Hugging Face model filename for aesthetic scorer model weights.
use_lora (`bool`, *optional*, defaults to `True`):
Whether to use LoRA.
"""
pretrained_model: str = field(
default="runwayml/stable-diffusion-v1-5", metadata={"help": "Pretrained model to use."}
)
pretrained_revision: str = field(default="main", metadata={"help": "Pretrained model revision to use."})
hf_hub_model_id: str = field(
default="ddpo-finetuned-stable-diffusion", metadata={"help": "HuggingFace repo to save model weights to."}
)
hf_hub_aesthetic_model_id: str = field(
default="trl-lib/ddpo-aesthetic-predictor",
metadata={"help": "Hugging Face model ID for aesthetic scorer model weights."},
)
hf_hub_aesthetic_model_filename: str = field(
default="aesthetic-model.pth",
metadata={"help": "Hugging Face model filename for aesthetic scorer model weights."},
)
use_lora: bool = field(default=True, metadata={"help": "Whether to use LoRA."})
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(768, 1024),
nn.Dropout(0.2),
nn.Linear(1024, 128),
nn.Dropout(0.2),
nn.Linear(128, 64),
nn.Dropout(0.1),
nn.Linear(64, 16),
nn.Linear(16, 1),
)
@torch.no_grad()
def forward(self, embed):
return self.layers(embed)
class AestheticScorer(torch.nn.Module):
"""
This model attempts to predict the aesthetic score of an image. The aesthetic score
is a numerical approximation of how much a specific image is liked by humans on average.
This is from https://github.com/christophschuhmann/improved-aesthetic-predictor
"""
def __init__(self, *, dtype, model_id, model_filename):
super().__init__()
self.clip = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
self.mlp = MLP()
try:
cached_path = hf_hub_download(model_id, model_filename)
except EntryNotFoundError:
cached_path = os.path.join(model_id, model_filename)
state_dict = torch.load(cached_path, map_location=torch.device("cpu"), weights_only=True)
self.mlp.load_state_dict(state_dict)
self.dtype = dtype
self.eval()
@torch.no_grad()
def __call__(self, images):
device = next(self.parameters()).device
inputs = self.processor(images=images, return_tensors="pt")
inputs = {k: v.to(self.dtype).to(device) for k, v in inputs.items()}
embed = self.clip.get_image_features(**inputs)
# normalize embedding
embed = embed / torch.linalg.vector_norm(embed, dim=-1, keepdim=True)
return self.mlp(embed).squeeze(1)
def aesthetic_scorer(hub_model_id, model_filename):
scorer = AestheticScorer(
model_id=hub_model_id,
model_filename=model_filename,
dtype=torch.float32,
)
if is_torch_npu_available():
scorer = scorer.npu()
elif is_torch_xpu_available():
scorer = scorer.xpu()
else:
scorer = scorer.cuda()
def _fn(images, prompts, metadata):
images = (images * 255).round().clamp(0, 255).to(torch.uint8)
scores = scorer(images)
return scores, {}
return _fn
# list of example prompts to feed stable diffusion
animals = [
"cat",
"dog",
"horse",
"monkey",
"rabbit",
"zebra",
"spider",
"bird",
"sheep",
"deer",
"cow",
"goat",
"lion",
"frog",
"chicken",
"duck",
"goose",
"bee",
"pig",
"turkey",
"fly",
"llama",
"camel",
"bat",
"gorilla",
"hedgehog",
"kangaroo",
]
def prompt_fn():
return np.random.choice(animals), {}
def image_outputs_logger(image_data, global_step, accelerate_logger):
# For the sake of this example, we will only log the last batch of images
# and associated data
result = {}
images, prompts, _, rewards, _ = image_data[-1]
for i, image in enumerate(images):
prompt = prompts[i]
reward = rewards[i].item()
result[f"{prompt:.25} | {reward:.2f}"] = image.unsqueeze(0).float()
accelerate_logger.log_images(
result,
step=global_step,
)
if __name__ == "__main__":
parser = HfArgumentParser((ScriptArguments, DDPOConfig))
script_args, training_args = parser.parse_args_into_dataclasses()
training_args.project_kwargs = {
"logging_dir": "./logs",
"automatic_checkpoint_naming": True,
"total_limit": 5,
"project_dir": "./save",
}
pipeline = DefaultDDPOStableDiffusionPipeline(
script_args.pretrained_model,
pretrained_model_revision=script_args.pretrained_revision,
use_lora=script_args.use_lora,
)
trainer = DDPOTrainer(
training_args,
aesthetic_scorer(script_args.hf_hub_aesthetic_model_id, script_args.hf_hub_aesthetic_model_filename),
prompt_fn,
pipeline,
image_samples_hook=image_outputs_logger,
)
trainer.train()
# Save and push to hub
trainer.save_model(training_args.output_dir)
if training_args.push_to_hub:
trainer.push_to_hub(dataset_name=script_args.dataset_name)
| trl/examples/scripts/ddpo.py/0 | {
"file_path": "trl/examples/scripts/ddpo.py",
"repo_id": "trl",
"token_count": 3209
} | 595 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# /// script
# dependencies = [
# "trl @ git+https://github.com/huggingface/trl.git",
# ]
# ///
"""
Full training:
python examples/scripts/reward_modeling.py \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--dataset_name trl-lib/ultrafeedback_binarized \
--output_dir Qwen2-0.5B-Reward \
--per_device_train_batch_size 8 \
--num_train_epochs 1 \
--gradient_checkpointing True \
--learning_rate 1.0e-5 \
--eval_strategy steps \
--eval_steps 50 \
--max_length 2048
LoRA:
python examples/scripts/reward_modeling.py \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--dataset_name trl-lib/ultrafeedback_binarized \
--output_dir Qwen2-0.5B-Reward-LoRA \
--per_device_train_batch_size 8 \
--num_train_epochs 1 \
--gradient_checkpointing True \
--learning_rate 1.0e-4 \
--eval_strategy steps \
--eval_steps 50 \
--max_length 2048 \
--use_peft \
--lora_r 32 \
--lora_alpha 16
"""
import torch
from accelerate import logging
from datasets import load_dataset
from transformers import AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser
from trl import (
ModelConfig,
RewardConfig,
RewardTrainer,
ScriptArguments,
get_kbit_device_map,
get_peft_config,
get_quantization_config,
setup_chat_format,
)
logger = logging.get_logger(__name__)
if __name__ == "__main__":
parser = HfArgumentParser((ScriptArguments, RewardConfig, ModelConfig))
script_args, training_args, model_args = parser.parse_args_into_dataclasses()
training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False)
################
# Model & Tokenizer
################
torch_dtype = (
model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype)
)
quantization_config = get_quantization_config(model_args)
model_kwargs = dict(
revision=model_args.model_revision,
device_map=get_kbit_device_map() if quantization_config is not None else None,
quantization_config=quantization_config,
use_cache=False if training_args.gradient_checkpointing else True,
torch_dtype=torch_dtype,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, use_fast=True
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, num_labels=1, trust_remote_code=model_args.trust_remote_code, **model_kwargs
)
# Align padding tokens between tokenizer and model
model.config.pad_token_id = tokenizer.pad_token_id
# If post-training a base model, use ChatML as the default template
if tokenizer.chat_template is None:
model, tokenizer = setup_chat_format(model, tokenizer)
if model_args.use_peft and model_args.lora_task_type != "SEQ_CLS":
logger.warning(
"You are using a `task_type` that is different than `SEQ_CLS` for PEFT. This will lead to silent bugs"
" Make sure to pass --lora_task_type SEQ_CLS when using this script with PEFT.",
)
##############
# Load dataset
##############
dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config)
##########
# Training
##########
trainer = RewardTrainer(
model=model,
processing_class=tokenizer,
args=training_args,
train_dataset=dataset[script_args.dataset_train_split],
eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None,
peft_config=get_peft_config(model_args),
)
trainer.train()
############################
# Save model and push to Hub
############################
trainer.save_model(training_args.output_dir)
if training_args.eval_strategy != "no":
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Save and push to hub
trainer.save_model(training_args.output_dir)
if training_args.push_to_hub:
trainer.push_to_hub(dataset_name=script_args.dataset_name)
| trl/examples/scripts/reward_modeling.py/0 | {
"file_path": "trl/examples/scripts/reward_modeling.py",
"repo_id": "trl",
"token_count": 1851
} | 596 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from datasets import Dataset
from transformers import HfArgumentParser
@dataclass
class ScriptArguments:
r"""
Arguments for the script.
Args:
test_size (`float`, *optional*, defaults to `0.1`):
Fraction of the dataset to include in the test split.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether to push the dataset to the Hugging Face Hub.
repo_id (`str`, *optional*, defaults to `"trl-internal-testing/zen"`):
Hugging Face repository ID to push the dataset to.
"""
test_size: float = field(
default=0.1,
metadata={"help": "Fraction of the dataset to include in the test split."},
)
push_to_hub: bool = field(
default=False,
metadata={"help": "Whether to push the dataset to the Hugging Face Hub."},
)
repo_id: str = field(
default="trl-internal-testing/zen",
metadata={"help": "Hugging Face repository ID to push the dataset to."},
)
def main(test_size, push_to_hub, repo_id):
# fmt: off
standard_language_modeling_dataset = Dataset.from_dict({
"text": [
"Beautiful is better than ugly.",
"Explicit is better than implicit.",
"Simple is better than complex.",
"Complex is better than complicated.",
"Flat is better than nested.",
"Sparse is better than dense.",
"Readability counts.",
"Special cases aren't special enough to break the rules.",
"Although practicality beats purity.",
"Errors should never pass silently.",
"Unless explicitly silenced.",
"In the face of ambiguity, refuse the temptation to guess.",
"There should be one-- and preferably only one --obvious way to do it.",
"Although that way may not be obvious at first unless you're Dutch.",
"Now is better than never.",
"Although never is often better than *right* now.",
"If the implementation is hard to explain, it's a bad idea.",
"If the implementation is easy to explain, it may be a good idea.",
"Namespaces are one honking great idea -- let's do more of those!",
],
})
standard_language_modeling_dataset = standard_language_modeling_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
standard_language_modeling_dataset.push_to_hub(repo_id, config_name="standard_language_modeling")
standard_prompt_only_dataset = Dataset.from_dict({
"prompt": [
"Beautiful is better than",
"Explicit is",
"Simple is better",
"Complex",
"Flat is better than",
"Sparse is better",
"Readability",
"Special cases aren't special",
"Although practicality beats",
"Errors should never",
"Unless explicitly",
"In the face of ambiguity, refuse",
"There should be one-- and preferably",
"Although that way may not be obvious at first unless you're",
"Now is",
"Although never is often",
"If the implementation is hard to explain,",
"If the implementation is easy",
"Namespaces are one honking great",
],
})
standard_prompt_only_dataset = standard_prompt_only_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
standard_prompt_only_dataset.push_to_hub(repo_id, config_name="standard_prompt_only")
standard_prompt_completion_dataset = Dataset.from_dict({
"prompt": [
"Beautiful is better than",
"Explicit is",
"Simple is better",
"Complex",
"Flat is better than",
"Sparse is better",
"Readability",
"Special cases aren't special",
"Although practicality beats",
"Errors should never",
"Unless explicitly",
"In the face of ambiguity, refuse",
"There should be one-- and preferably",
"Although that way may not be obvious at first unless you're",
"Now is",
"Although never is often",
"If the implementation is hard to explain,",
"If the implementation is easy",
"Namespaces are one honking great",
],
"completion": [
" ugly.",
" better than implicit.",
" than complex.",
" is better than complicated.",
" nested.",
" than dense.",
" counts.",
" enough to break the rules.",
" purity.",
" pass silently.",
" silenced.",
" the temptation to guess.",
" only one --obvious way to do it.",
" Dutch.",
" better than never.",
" better than *right* now.",
" it's a bad idea.",
" to explain, it may be a good idea.",
" idea -- let's do more of those!",
],
})
standard_prompt_completion_dataset = standard_prompt_completion_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
standard_prompt_completion_dataset.push_to_hub(repo_id, config_name="standard_prompt_completion")
standard_preference_dataset = Dataset.from_dict({
"prompt": [
"Beautiful is better than",
"Explicit is",
"Simple is better",
"Complex",
"Flat is better than",
"Sparse is better",
"Readability",
"Special cases aren't special",
"Although practicality beats",
"Errors should never",
"Unless explicitly",
"In the face of ambiguity, refuse",
"There should be one-- and preferably",
"Although that way may not be obvious at first unless you're",
"Now is",
"Although never is often",
"If the implementation is hard to explain,",
"If the implementation is easy",
"Namespaces are one honking great",
],
"chosen": [
" ugly.",
" better than implicit.",
" than complex.",
" is better than complicated.",
" nested.",
" than dense.",
" counts.",
" enough to break the rules.",
" purity.",
" pass silently.",
" silenced.",
" the temptation to guess.",
" only one --obvious way to do it.",
" Dutch.",
" better than never.",
" better than *right* now.",
" it's a bad idea.",
" to explain, it may be a good idea.",
" idea -- let's do more of those!",
],
"rejected": [
" the moon.",
" worse than nothing.",
" than a long vacation.",
" is always the answer.",
" chocolate.",
" without any context.",
" is optional.",
" enough to become unicorns.",
" reality.",
" pass their driving test.",
" forgotten.",
" the opportunity to laugh.",
" two or more confusing methods.",
" a time traveler.",
" never better.",
" not even a possibility.",
" it's clearly the best choice.",
" it's probably magic.",
" watermelon -- let's plant some!",
],
})
standard_preference_dataset = standard_preference_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
standard_preference_dataset.push_to_hub(repo_id, config_name="standard_preference")
standard_implicit_prompt_preference_dataset = Dataset.from_dict({
"chosen": [
"Beautiful is better than ugly.",
"Explicit is better than implicit.",
"Simple is better than complex.",
"Complex is better than complicated.",
"Flat is better than nested.",
"Sparse is better than dense.",
"Readability counts.",
"Special cases aren't special enough to break the rules.",
"Although practicality beats purity.",
"Errors should never pass silently.",
"Unless explicitly silenced.",
"In the face of ambiguity, refuse the temptation to guess.",
"There should be one-- and preferably only one --obvious way to do it.",
"Although that way may not be obvious at first unless you're Dutch.",
"Now is better than never.",
"Although never is often better than *right* now.",
"If the implementation is hard to explain, it's a bad idea.",
"If the implementation is easy to explain, it may be a good idea.",
"Namespaces are one honking great idea -- let's do more of those!",
],
"rejected": [
"Beautiful is better than the moon.",
"Explicit is worse than nothing.",
"Simple is better than a long vacation.",
"Complex is always the answer.",
"Flat is better than chocolate.",
"Sparse is better without any context.",
"Readability is optional.",
"Special cases aren't special enough to become unicorns.",
"Although practicality beats reality.",
"Errors should never pass their driving test.",
"Unless explicitly forgotten.",
"In the face of ambiguity, refuse the opportunity to laugh.",
"There should be one-- and preferably two or more confusing methods.",
"Although that way may not be obvious at first unless you're a time traveler.",
"Now is never better.",
"Although never is often not even a possibility.",
"If the implementation is hard to explain, it's clearly the best choice.",
"If the implementation is easy it's probably magic.",
"Namespaces are one honking great watermelon -- let's plant some!",
],
})
standard_implicit_prompt_preference_dataset = standard_implicit_prompt_preference_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
standard_implicit_prompt_preference_dataset.push_to_hub(repo_id, config_name="standard_implicit_prompt_preference")
standard_unpaired_preference_dataset = Dataset.from_dict({
"prompt": [
"Beautiful is better than",
"Explicit is",
"Simple is better",
"Complex",
"Flat is better than",
"Sparse is better",
"Readability",
"Special cases aren't special",
"Although practicality beats",
"Errors should never",
"Unless explicitly",
"In the face of ambiguity, refuse",
"There should be one-- and preferably",
"Although that way may not be obvious at first unless you're",
"Now is",
"Although never is often",
"If the implementation is hard to explain,",
"If the implementation is easy",
"Namespaces are one honking great",
],
"completion": [
" ugly.",
" worse than nothing.",
" than a long vacation.",
" is better than complicated.",
" nested.",
" without any context.",
" counts.",
" enough to become unicorns.",
" purity.",
" pass silently.",
" forgotten.",
" the temptation to guess.",
" only one --obvious way to do it.",
" a time traveler.",
" better than never.",
" not even a possibility.",
" it's a bad idea.",
" it's probably magic.",
" watermelon -- let's plant some!",
],
"label": [True, False, False, True, True, False, True, False, True, True, False, True, True, False, True, False, True, False, False],
})
standard_unpaired_preference_dataset = standard_unpaired_preference_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
standard_unpaired_preference_dataset.push_to_hub(repo_id, config_name="standard_unpaired_preference")
standard_stepwise_supervision_dataset = Dataset.from_dict({
"prompt": [
"Beautiful is better than",
"Explicit is better than",
"Simple is better than",
"Complex is better than",
"Flat is better than",
"Sparse is better than",
"Readability counts",
"Special cases aren't special enough",
"Although practicality beats",
"Errors should never pass",
"In the face of ambiguity, refuse",
"There should be one-- and preferably only one --",
"Although that way may not be",
"Now is better than",
"Never is often better than",
"If the implementation is hard to explain, it's",
"If the implementation is easy to explain, it",
"Namespaces are one",
"Although practicality sometimes beats purity,",
],
"completions":[
[", let me think...", " ugly."],
[", of course,", " implicit.", " because clarity matters."],
["... let's keep it basic,", " complex."],
[" when needed,", " complicated."],
[" in terms of structure,", " nested."],
["... especially for readability."],
[" especially when others read it."],
[", unless...", " they follow the rules."],
[" some theoretical elegance,", " purity."],
[" silently,", " unless explicitly silenced."],
[" the temptation to guess."],
[" way to do it,"," but sometimes it's not obvious.", " especially when there's more than one possibility."],
[" clear at first,", " it will eventually emerge."],
[" later."],
[" problematic fixes."],
[" likely because it's too complicated."],
[" might be a good design."],
[" of those great ideas,", " that solve many problems."],
[" the code should still aim for balance."],
],
"labels": [
[False, True],
[False, True, False],
[False, True],
[True, True],
[True, False],
[True],
[False],
[True, False],
[False, False],
[False, False],
[True],
[True, True, False],
[True, True],
[False],
[True], [False],
[False],
[True, True],
[False]
]
})
standard_stepwise_supervision_dataset = standard_stepwise_supervision_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
standard_stepwise_supervision_dataset.push_to_hub(repo_id, config_name="standard_stepwise_supervision")
conversational_language_modeling_dataset = Dataset.from_dict({
"messages": [
[{"role": "user", "content": "What is better than ugly?"}, {"role": "assistant", "content": "Beautiful."},],
[{"role": "user", "content": "What is better than implicit?"}, {"role": "assistant", "content": "Explicit."}],
[{"role": "user", "content": "What is better than complex?"}, {"role": "assistant", "content": "Simple."}],
[{"role": "user", "content": "What is better than complicated?"}, {"role": "assistant", "content": "Complex."}],
[{"role": "user", "content": "What is better than nested?"}, {"role": "assistant", "content": "Flat."}],
[{"role": "user", "content": "What is better than dense?"}, {"role": "assistant", "content": "Sparse."}],
[{"role": "user", "content": "What counts?"}, {"role": "assistant", "content": "Readability."}],
[{"role": "user", "content": "Are special cases enough to break the rules?"}, {"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}],
[{"role": "user", "content": "What beats purity?"}, {"role": "assistant", "content": "Practicality."}],
[{"role": "user", "content": "What should never pass silently?"}, {"role": "assistant", "content": "Errors."}],
[{"role": "user", "content": "When can errors pass silently?"}, {"role": "assistant", "content": "When explicitly silenced."}],
[{"role": "user", "content": "What should you do in the face of ambiguity?"}, {"role": "assistant", "content": "Refuse the temptation to guess."}],
[{"role": "user", "content": "How many ways should there be to do it?"}, {"role": "assistant", "content": "One, and preferably only one."}],
[{"role": "user", "content": "For whom may the way not be obvious at first?"}, {"role": "assistant", "content": "Dutch."}],
[{"role": "user", "content": "What is better than never?"}, {"role": "assistant", "content": "Now is better than never."}],
[{"role": "user", "content": "Is never better than *right* now?"}, {"role": "assistant", "content": "Yes, often."}],
[{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}, {"role": "assistant", "content": "It means it's a bad idea."}],
[{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}, {"role": "assistant", "content": "It means it may be a good idea."}],
[{"role": "user", "content": "Any great ideas?"}, {"role": "assistant", "content": "Namespaces are one honking great idea."}],
],
})
conversational_language_modeling_dataset = conversational_language_modeling_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
conversational_language_modeling_dataset.push_to_hub(repo_id, config_name="conversational_language_modeling")
conversational_prompt_only_dataset = Dataset.from_dict({
"prompt": [
[{"role": "user", "content": "What is better than ugly?"}],
[{"role": "user", "content": "What is better than implicit?"}],
[{"role": "user", "content": "What is better than complex?"}],
[{"role": "user", "content": "What is better than complicated?"}],
[{"role": "user", "content": "What is better than nested?"}],
[{"role": "user", "content": "What is better than dense?"}],
[{"role": "user", "content": "What counts?"}],
[{"role": "user", "content": "Are special cases enough to break the rules?"}],
[{"role": "user", "content": "What beats purity?"}],
[{"role": "user", "content": "What should never pass silently?"}],
[{"role": "user", "content": "When can errors pass silently?"}],
[{"role": "user", "content": "What should you do in the face of ambiguity?"}],
[{"role": "user", "content": "How many ways should there be to do it?"}],
[{"role": "user", "content": "For whom may the way not be obvious at first?"}],
[{"role": "user", "content": "What is better than never?"}],
[{"role": "user", "content": "Is never better than *right* now?"}],
[{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}],
[{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}],
[{"role": "user", "content": "Any great ideas?"}],
],
})
conversational_prompt_only_dataset = conversational_prompt_only_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
conversational_prompt_only_dataset.push_to_hub(repo_id, config_name="conversational_prompt_only")
conversational_prompt_completion_dataset = Dataset.from_dict({
"prompt": [
[{"role": "user", "content": "What is better than ugly?"}],
[{"role": "user", "content": "What is better than implicit?"}],
[{"role": "user", "content": "What is better than complex?"}],
[{"role": "user", "content": "What is better than complicated?"}],
[{"role": "user", "content": "What is better than nested?"}],
[{"role": "user", "content": "What is better than dense?"}],
[{"role": "user", "content": "What counts?"}],
[{"role": "user", "content": "Are special cases enough to break the rules?"}],
[{"role": "user", "content": "What beats purity?"}],
[{"role": "user", "content": "What should never pass silently?"}],
[{"role": "user", "content": "When can errors pass silently?"}],
[{"role": "user", "content": "What should you do in the face of ambiguity?"}],
[{"role": "user", "content": "How many ways should there be to do it?"}],
[{"role": "user", "content": "For whom may the way not be obvious at first?"}],
[{"role": "user", "content": "What is better than never?"}],
[{"role": "user", "content": "Is never better than *right* now?"}],
[{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}],
[{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}],
[{"role": "user", "content": "Any great ideas?"}],
],
"completion": [
[{"role": "assistant", "content": "Beautiful."}],
[{"role": "assistant", "content": "Explicit."}],
[{"role": "assistant", "content": "Simple."}],
[{"role": "assistant", "content": "Complex."}],
[{"role": "assistant", "content": "Flat."}],
[{"role": "assistant", "content": "Sparse."}],
[{"role": "assistant", "content": "Readability."}],
[{"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}],
[{"role": "assistant", "content": "Practicality."}],
[{"role": "assistant", "content": "Errors."}],
[{"role": "assistant", "content": "When explicitly silenced."}],
[{"role": "assistant", "content": "Refuse the temptation to guess."}],
[{"role": "assistant", "content": "One, and preferably only one."}],
[{"role": "assistant", "content": "Dutch."}],
[{"role": "assistant", "content": "Now is better than never."}],
[{"role": "assistant", "content": "Yes, often."}],
[{"role": "assistant", "content": "It means it's a bad idea."}],
[{"role": "assistant", "content": "It means it may be a good idea."}],
[{"role": "assistant", "content": "Namespaces are one honking great idea."}],
],
})
conversational_prompt_completion_dataset = conversational_prompt_completion_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
conversational_prompt_completion_dataset.push_to_hub(repo_id, config_name="conversational_prompt_completion")
conversational_preference_dataset = Dataset.from_dict({
"prompt": [
[{"role": "user", "content": "What is better than ugly?"}],
[{"role": "user", "content": "What is better than implicit?"}],
[{"role": "user", "content": "What is better than complex?"}],
[{"role": "user", "content": "What is better than complicated?"}],
[{"role": "user", "content": "What is better than nested?"}],
[{"role": "user", "content": "What is better than dense?"}],
[{"role": "user", "content": "What counts?"}],
[{"role": "user", "content": "Are special cases enough to break the rules?"}],
[{"role": "user", "content": "What beats purity?"}],
[{"role": "user", "content": "What should never pass silently?"}],
[{"role": "user", "content": "When can errors pass silently?"}],
[{"role": "user", "content": "What should you do in the face of ambiguity?"}],
[{"role": "user", "content": "How many ways should there be to do it?"}],
[{"role": "user", "content": "For whom may the way not be obvious at first?"}],
[{"role": "user", "content": "What is better than never?"}],
[{"role": "user", "content": "Is never better than *right* now?"}],
[{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}],
[{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}],
[{"role": "user", "content": "Any great ideas?"}],
],
"chosen": [
[{"role": "assistant", "content": "Beautiful."}],
[{"role": "assistant", "content": "Explicit."}],
[{"role": "assistant", "content": "Simple."}],
[{"role": "assistant", "content": "Complex."}],
[{"role": "assistant", "content": "Flat."}],
[{"role": "assistant", "content": "Sparse."}],
[{"role": "assistant", "content": "Readability."}],
[{"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}],
[{"role": "assistant", "content": "Practicality."}],
[{"role": "assistant", "content": "Errors."}],
[{"role": "assistant", "content": "When explicitly silenced."}],
[{"role": "assistant", "content": "Refuse the temptation to guess."}],
[{"role": "assistant", "content": "One, and preferably only one."}],
[{"role": "assistant", "content": "Dutch."}],
[{"role": "assistant", "content": "Now is better than never."}],
[{"role": "assistant", "content": "Yes, often."}],
[{"role": "assistant", "content": "It means it's a bad idea."}],
[{"role": "assistant", "content": "It means it may be a good idea."}],
[{"role": "assistant", "content": "Namespaces are one honking great idea."}],
],
"rejected": [
[{"role": "assistant", "content": "Acceptable."}],
[{"role": "assistant", "content": "Explained."}],
[{"role": "assistant", "content": "Very complex."}],
[{"role": "assistant", "content": "Very complicated."}],
[{"role": "assistant", "content": "Circular."}],
[{"role": "assistant", "content": "Heavy."}],
[{"role": "assistant", "content": "Looking complicated."}],
[{"role": "assistant", "content": "Yes, special cases are special enough to break the rules."}],
[{"role": "assistant", "content": "Nothing."}],
[{"role": "assistant", "content": "Warnings."}],
[{"role": "assistant", "content": "Never."}],
[{"role": "assistant", "content": "Give up."}],
[{"role": "assistant", "content": "As many as possible."}],
[{"role": "assistant", "content": "French."}],
[{"role": "assistant", "content": "Some day."}],
[{"role": "assistant", "content": "No, never."}],
[{"role": "assistant", "content": "It means it's a good idea."}],
[{"role": "assistant", "content": "It means it's a bad idea."}],
[{"role": "assistant", "content": "Recursion."}],
],
})
conversational_preference_dataset = conversational_preference_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
conversational_preference_dataset.push_to_hub(repo_id, config_name="conversational_preference")
conversational_implicit_prompt_preference_dataset = Dataset.from_dict({
"chosen": [
[{"role": "user", "content": "What is better than ugly?"}, {"role": "assistant", "content": "Beautiful."}],
[{"role": "user", "content": "What is better than implicit?"}, {"role": "assistant", "content": "Explicit."}],
[{"role": "user", "content": "What is better than complex?"}, {"role": "assistant", "content": "Simple."}],
[{"role": "user", "content": "What is better than complicated?"}, {"role": "assistant", "content": "Complex."}],
[{"role": "user", "content": "What is better than nested?"}, {"role": "assistant", "content": "Flat."}],
[{"role": "user", "content": "What is better than dense?"}, {"role": "assistant", "content": "Sparse."}],
[{"role": "user", "content": "What counts?"}, {"role": "assistant", "content": "Readability."}],
[{"role": "user", "content": "Are special cases enough to break the rules?"}, {"role": "assistant", "content": "No, special cases aren't special enough to break the rules."}],
[{"role": "user", "content": "What beats purity?"}, {"role": "assistant", "content": "Practicality."}],
[{"role": "user", "content": "What should never pass silently?"}, {"role": "assistant", "content": "Errors."}],
[{"role": "user", "content": "When can errors pass silently?"}, {"role": "assistant", "content": "When explicitly silenced."}],
[{"role": "user", "content": "What should you do in the face of ambiguity?"}, {"role": "assistant", "content": "Refuse the temptation to guess."}],
[{"role": "user", "content": "How many ways should there be to do it?"}, {"role": "assistant", "content": "One, and preferably only one."}],
[{"role": "user", "content": "For whom may the way not be obvious at first?"}, {"role": "assistant", "content": "Dutch."}],
[{"role": "user", "content": "What is better than never?"}, {"role": "assistant", "content": "Now is better than never."}],
[{"role": "user", "content": "Is never better than *right* now?"}, {"role": "assistant", "content": "Yes, often."}],
[{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}, {"role": "assistant", "content": "It means it's a bad idea."}],
[{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}, {"role": "assistant", "content": "It means it may be a good idea."}],
[{"role": "user", "content": "Any great ideas?"}, {"role": "assistant", "content": "Namespaces are one honking great idea."}],
],
"rejected": [
[{"role": "user", "content": "What is better than ugly?"}, {"role": "assistant", "content": "Acceptable."}],
[{"role": "user", "content": "What is better than implicit?"}, {"role": "assistant", "content": "Explained."}],
[{"role": "user", "content": "What is better than complex?"}, {"role": "assistant", "content": "Very complex."}],
[{"role": "user", "content": "What is better than complicated?"}, {"role": "assistant", "content": "Very complicated."}],
[{"role": "user", "content": "What is better than nested?"}, {"role": "assistant", "content": "Circular."}],
[{"role": "user", "content": "What is better than dense?"}, {"role": "assistant", "content": "Heavy."}],
[{"role": "user", "content": "What counts?"}, {"role": "assistant", "content": "Looking complicated."}],
[{"role": "user", "content": "Are special cases enough to break the rules?"}, {"role": "assistant", "content": "Yes, special cases are special enough to break the rules."}],
[{"role": "user", "content": "What beats purity?"}, {"role": "assistant", "content": "Nothing."}],
[{"role": "user", "content": "What should never pass silently?"}, {"role": "assistant", "content": "Warnings."}],
[{"role": "user", "content": "When can errors pass silently?"}, {"role": "assistant", "content": "Never."}],
[{"role": "user", "content": "What should you do in the face of ambiguity?"}, {"role": "assistant", "content": "Give up."}],
[{"role": "user", "content": "How many ways should there be to do it?"}, {"role": "assistant", "content": "As many as possible."}],
[{"role": "user", "content": "For whom may the way not be obvious at first?"}, {"role": "assistant", "content": "French."}],
[{"role": "user", "content": "What is better than never?"}, {"role": "assistant", "content": "Some day."}],
[{"role": "user", "content": "Is never better than *right* now?"}, {"role": "assistant", "content": "No, never."}],
[{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}, {"role": "assistant", "content": "It means it's a good idea."}],
[{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}, {"role": "assistant", "content": "It means it's a bad idea."}],
[{"role": "user", "content": "Any great ideas?"}, {"role": "assistant", "content": "Recursion."}],
],
})
conversational_implicit_prompt_preference_dataset = conversational_implicit_prompt_preference_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
conversational_implicit_prompt_preference_dataset.push_to_hub(repo_id, config_name="conversational_implicit_prompt_preference")
conversational_unpaired_preference_dataset = Dataset.from_dict({
"prompt": [
[{"role": "user", "content": "What is better than ugly?"}],
[{"role": "user", "content": "What is better than implicit?"}],
[{"role": "user", "content": "What is better than complex?"}],
[{"role": "user", "content": "What is better than complicated?"}],
[{"role": "user", "content": "What is better than nested?"}],
[{"role": "user", "content": "What is better than dense?"}],
[{"role": "user", "content": "What counts?"}],
[{"role": "user", "content": "Are special cases enough to break the rules?"}],
[{"role": "user", "content": "What beats purity?"}],
[{"role": "user", "content": "What should never pass silently?"}],
[{"role": "user", "content": "When can errors pass silently?"}],
[{"role": "user", "content": "What should you do in the face of ambiguity?"}],
[{"role": "user", "content": "How many ways should there be to do it?"}],
[{"role": "user", "content": "For whom may the way not be obvious at first?"}],
[{"role": "user", "content": "What is better than never?"}],
[{"role": "user", "content": "Is never better than *right* now?"}],
[{"role": "user", "content": "What does it mean if the implementation is hard to explain?"}],
[{"role": "user", "content": "What does it mean if the implementation is easy to explain?"}],
[{"role": "user", "content": "Any great ideas?"}],
],
"completion": [
[{'role': 'assistant', 'content': 'Beautiful.'}],
[{'role': 'assistant', 'content': 'Explicit.'}],
[{'role': 'assistant', 'content': 'Simple.'}],
[{'role': 'assistant', 'content': 'Very complicated.'}],
[{'role': 'assistant', 'content': 'Flat.'}],
[{'role': 'assistant', 'content': 'Sparse.'}],
[{'role': 'assistant', 'content': 'Readability.'}],
[{'role': 'assistant', 'content': 'Yes, special cases are special enough to break the rules.'}],
[{'role': 'assistant', 'content': 'Practicality.'}],
[{'role': 'assistant', 'content': 'Warnings.'}],
[{'role': 'assistant', 'content': 'When explicitly silenced.'}],
[{'role': 'assistant', 'content': 'Give up.'}],
[{'role': 'assistant', 'content': 'One, and preferably only one.'}],
[{'role': 'assistant', 'content': 'French.'}],
[{'role': 'assistant', 'content': 'Some day.'}],
[{'role': 'assistant', 'content': 'Yes, often.'}],
[{'role': 'assistant', 'content': "It means it's a bad idea."}],
[{'role': 'assistant', 'content': 'It means it may be a good idea.'}],
[{'role': 'assistant', 'content': 'Namespaces are one honking great idea.'}],
],
"label": [True, True, True, False, True, True, True, False, True, False, True, False, True, False, False, True, True, True, True],
})
conversational_unpaired_preference_dataset = conversational_unpaired_preference_dataset.train_test_split(test_size=test_size, shuffle=False)
if push_to_hub:
conversational_unpaired_preference_dataset.push_to_hub(repo_id, config_name="conversational_unpaired_preference")
# fmt: on
if __name__ == "__main__":
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
main(script_args.test_size, script_args.push_to_hub, script_args.repo_id)
| trl/scripts/generate_zen_dataset.py/0 | {
"file_path": "trl/scripts/generate_zen_dataset.py",
"repo_id": "trl",
"token_count": 15761
} | 597 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import torch
from accelerate import Accelerator
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
from transformers.testing_utils import require_peft
from transformers.utils import is_peft_available
from trl import BCOConfig, BCOTrainer
from trl.trainer.bco_trainer import _process_tokens, _tokenize
from .testing_utils import TrlTestCase, require_no_wandb, require_sklearn
if is_peft_available():
from peft import LoraConfig
class BCOTrainerTester(TrlTestCase):
@parameterized.expand(
[
("standard_preference",),
("standard_implicit_prompt_preference",),
("standard_unpaired_preference",),
("conversational_preference",),
("conversational_implicit_prompt_preference",),
("conversational_unpaired_preference",),
]
)
@require_sklearn
def test_train(self, config_name):
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
model = AutoModelForCausalLM.from_pretrained(model_id)
ref_model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
dataset = load_dataset("trl-internal-testing/zen", config_name, split="train")
training_args = BCOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False, # warning raised if not set to False
learning_rate=0.1, # increase the learning rate to speed up the test
report_to="none",
)
trainer = BCOTrainer(
model=model,
ref_model=ref_model,
args=training_args,
processing_class=tokenizer,
train_dataset=dataset,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param.cpu(), new_param.cpu()))
@require_sklearn
def test_train_with_precompute(self):
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
model = AutoModelForCausalLM.from_pretrained(model_id)
ref_model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference", split="train")
training_args = BCOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False, # warning raised if not set to False
learning_rate=0.1, # increase the learning rate to speed up the test
precompute_ref_log_probs=True,
report_to="none",
)
trainer = BCOTrainer(
model=model,
ref_model=ref_model,
args=training_args,
processing_class=tokenizer,
train_dataset=dataset,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param.cpu(), new_param.cpu()))
@require_sklearn
def test_train_eval(self):
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
model = AutoModelForCausalLM.from_pretrained(model_id)
ref_model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
training_args = BCOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False, # warning raised if not set to False
eval_strategy="steps",
eval_steps=3,
report_to="none",
)
trainer = BCOTrainer(
model=model,
ref_model=ref_model,
args=training_args,
processing_class=tokenizer,
train_dataset=dataset["train"],
eval_dataset=dataset["test"],
)
trainer.train()
@require_sklearn
def test_init_with_ref_model_is_model(self):
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference", split="train")
training_args = BCOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False, # warning raised if not set to False
report_to="none",
)
with self.assertRaises(ValueError):
BCOTrainer(
model=model,
ref_model=model, # ref_model can't be the same as model
args=training_args,
processing_class=tokenizer,
train_dataset=dataset,
)
@require_sklearn
def test_tokenize_and_process_tokens(self):
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
model = AutoModelForCausalLM.from_pretrained(model_id)
ref_model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference", split="train")
training_args = BCOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False, # warning raised if not set to False
report_to="none",
)
trainer = BCOTrainer(
model=model,
ref_model=ref_model,
args=training_args,
processing_class=tokenizer,
train_dataset=dataset,
)
tokenized_dataset = dataset.map(
_tokenize,
fn_kwargs={"tokenizer": trainer.tokenizer},
batched=True,
batch_size=2,
)
self.assertListEqual(tokenized_dataset["prompt"][:], dataset["prompt"][:])
self.assertListEqual(tokenized_dataset["completion"][:], dataset["completion"][:])
self.assertListEqual(tokenized_dataset["label"][:], dataset["label"][:])
self.assertListEqual(tokenized_dataset["prompt_input_ids"][0], [46518, 374, 2664, 1091])
self.assertListEqual(tokenized_dataset["prompt_attention_mask"][0], [1, 1, 1, 1])
self.assertListEqual(tokenized_dataset["answer_input_ids"][0], [27261, 13])
self.assertListEqual(tokenized_dataset["answer_attention_mask"][0], [1, 1])
fn_kwargs = {
"prefix": "",
"is_encoder_decoder": trainer.is_encoder_decoder,
"tokenizer": trainer.tokenizer,
"max_length": trainer.max_length,
"truncation_mode": trainer.truncation_mode,
"label_pad_token_id": trainer.label_pad_token_id,
"max_prompt_length": trainer.max_prompt_length,
}
processed_dataset = tokenized_dataset.map(_process_tokens, fn_kwargs=fn_kwargs)
self.assertListEqual(processed_dataset["prompt"][:], dataset["prompt"][:])
self.assertListEqual(processed_dataset["completion"][:], dataset["completion"][:])
self.assertListEqual(processed_dataset["label"][:], dataset["label"][:])
self.assertListEqual(processed_dataset["prompt_input_ids"][0], [46518, 374, 2664, 1091])
self.assertListEqual(processed_dataset["prompt_attention_mask"][0], [1, 1, 1, 1])
self.assertListEqual(processed_dataset["completion_input_ids"][0], [46518, 374, 2664, 1091, 27261, 13, 151645])
self.assertListEqual(processed_dataset["completion_attention_mask"][0], [1, 1, 1, 1, 1, 1, 1])
self.assertListEqual(processed_dataset["completion_labels"][0], [-100, -100, -100, -100, 27261, 13, 151645])
@require_sklearn
def test_train_without_providing_ref_model(self):
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference", split="train")
training_args = BCOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False, # warning raised if not set to False
learning_rate=0.1, # increase the learning rate to speed up the test
report_to="none",
)
trainer = BCOTrainer(
model=model,
args=training_args,
processing_class=tokenizer,
train_dataset=dataset,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param.cpu(), new_param.cpu()))
@require_sklearn
def test_train_udm(self):
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
# Get embedding model
embedding_model_id = "trl-internal-testing/tiny-BartModel"
embedding_model = AutoModel.from_pretrained(embedding_model_id)
embedding_tokenizer = AutoTokenizer.from_pretrained(embedding_model_id)
def embed_prompt(input_ids, attention_mask, model):
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
return outputs.last_hidden_state.mean(dim=1)
embedding_model = Accelerator().prepare_model(embedding_model)
embedding_func = partial(embed_prompt, model=embedding_model)
dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference", split="train")
training_args = BCOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False, # warning raised if not set to False
learning_rate=0.1, # increase the learning rate to speed up the test
report_to="none",
)
trainer = BCOTrainer(
model=model,
args=training_args,
processing_class=tokenizer,
train_dataset=dataset,
embedding_func=embedding_func,
embedding_tokenizer=embedding_tokenizer,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param.cpu(), new_param.cpu()))
@require_sklearn
@require_peft
def test_train_without_providing_ref_model_with_lora(self):
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
model = AutoModelForCausalLM.from_pretrained(model_id)
lora_config = LoraConfig(r=16, lora_alpha=32, lora_dropout=0.05, task_type="CAUSAL_LM")
tokenizer = AutoTokenizer.from_pretrained(model_id)
dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference", split="train")
training_args = BCOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False, # warning raised if not set to False
learning_rate=0.1, # increase the learning rate to speed up the test
report_to="none",
)
trainer = BCOTrainer(
model=model,
args=training_args,
processing_class=tokenizer,
train_dataset=dataset,
peft_config=lora_config,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
if "lora" in n:
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param.cpu(), new_param.cpu()))
@require_sklearn
@require_no_wandb
def test_generate_during_eval_no_wandb(self):
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
training_args = BCOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False, # warning raised if not set to False
eval_strategy="steps",
eval_steps=3,
generate_during_eval=True,
report_to="none",
)
with self.assertRaisesRegex(
ValueError,
expected_regex="`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
" Please install `wandb` or `comet-ml` to resolve.",
):
BCOTrainer(
model=model,
args=training_args,
processing_class=tokenizer,
train_dataset=dataset["train"],
eval_dataset=dataset["test"],
)
@require_sklearn
@require_peft
def test_lora_train_and_save(self):
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
model = AutoModelForCausalLM.from_pretrained(model_id)
lora_config = LoraConfig(r=16, lora_alpha=32, lora_dropout=0.05, task_type="CAUSAL_LM")
tokenizer = AutoTokenizer.from_pretrained(model_id)
dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
training_args = BCOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False, # warning raised if not set to False
report_to="none",
)
trainer = BCOTrainer(
model=model,
args=training_args,
processing_class=tokenizer,
train_dataset=dataset["train"],
peft_config=lora_config,
)
# train the model
trainer.train()
# save peft adapter
trainer.save_model()
# assert that the model is loaded without giving OSError
AutoModelForCausalLM.from_pretrained(self.tmp_dir)
@require_sklearn
def test_compute_metrics(self):
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
model = AutoModelForCausalLM.from_pretrained(model_id)
ref_model = AutoModelForCausalLM.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
def dummy_compute_metrics(*args, **kwargs):
return {"test": 0.0}
training_args = BCOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False, # warning raised if not set to False
eval_strategy="steps",
eval_steps=3,
report_to="none",
)
trainer = BCOTrainer(
model=model,
ref_model=ref_model,
args=training_args,
processing_class=tokenizer,
train_dataset=dataset["train"],
eval_dataset=dataset["test"],
compute_metrics=dummy_compute_metrics,
)
trainer.train()
self.assertEqual(trainer.state.log_history[-2]["eval_test"], 0.0)
| trl/tests/test_bco_trainer.py/0 | {
"file_path": "trl/tests/test_bco_trainer.py",
"repo_id": "trl",
"token_count": 7812
} | 598 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers.testing_utils import require_liger_kernel, require_peft
from trl import KTOConfig, KTOTrainer
from trl.trainer.kto_trainer import _get_kl_dataset, _process_tokens, _tokenize
from .testing_utils import TrlTestCase, require_no_wandb
class KTOTrainerTester(TrlTestCase):
def setUp(self):
super().setUp()
self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
self.model = AutoModelForCausalLM.from_pretrained(self.model_id)
self.ref_model = AutoModelForCausalLM.from_pretrained(self.model_id)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.tokenizer.pad_token = self.tokenizer.eos_token
# get t5 as seq2seq example:
model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration"
self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
self.t5_ref_model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id)
@parameterized.expand(
[
("qwen", "standard_preference", "kto", True, True),
# ("t5", "standard_implicit_prompt_preference", "kto", True, False), # KTO broken for enc-dec
("qwen", "standard_unpaired_preference", "kto", False, True),
# ("t5", "conversational_preference", "kto", False, False),
("qwen", "conversational_implicit_prompt_preference", "apo_zero_unpaired", True, True),
# ("t5", "conversational_unpaired_preference", "apo_zero_unpaired", True, False),
("qwen", "standard_unpaired_preference", "apo_zero_unpaired", False, True),
# ("t5", "conversational_unpaired_preference", "apo_zero_unpaired", False, False),
]
)
def test_kto_trainer(self, name, config_name, loss_type, pre_compute, eval_dataset):
training_args = KTOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps" if eval_dataset else "no",
beta=0.1,
precompute_ref_log_probs=pre_compute,
loss_type=loss_type,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", config_name)
if name == "qwen":
model = self.model
ref_model = self.ref_model
tokenizer = self.tokenizer
elif name == "t5":
model = self.t5_model
ref_model = self.t5_ref_model
tokenizer = self.t5_tokenizer
trainer = KTOTrainer(
model=model,
ref_model=ref_model,
args=training_args,
processing_class=tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"] if eval_dataset else None,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param, new_param))
def test_kto_trainer_with_ref_model_is_model(self):
training_args = KTOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
with self.assertRaises(ValueError):
KTOTrainer(
model=self.model,
ref_model=self.model, # ref_model can't be the same as model
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
)
def test_tokenize_and_process_tokens(self):
training_args = KTOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
trainer = KTOTrainer(
model=self.model,
ref_model=self.ref_model,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
)
train_dataset = dummy_dataset["train"]
tokenized_dataset = train_dataset.map(
_tokenize,
fn_kwargs={"tokenizer": trainer.tokenizer},
batched=True,
batch_size=2,
)
self.assertListEqual(tokenized_dataset["prompt"][:], train_dataset["prompt"][:])
self.assertListEqual(tokenized_dataset["completion"][:], train_dataset["completion"][:])
self.assertListEqual(tokenized_dataset["label"][:], train_dataset["label"][:])
self.assertListEqual(tokenized_dataset["prompt_input_ids"][0], [46518, 374, 2664, 1091])
self.assertListEqual(tokenized_dataset["prompt_attention_mask"][0], [1, 1, 1, 1])
self.assertListEqual(tokenized_dataset["answer_input_ids"][0], [27261, 13])
self.assertListEqual(tokenized_dataset["answer_attention_mask"][0], [1, 1])
# Test corruption of (prompt, completion) pairs for KL dataset
for batch_size in [2, 3]:
tokenized_kl_dataset = tokenized_dataset.map(_get_kl_dataset, batched=True, batch_size=batch_size)
# Verify that the "answer_input_ids" have been modified, meaning the new "answer_input_ids" differ
# from the original ones. However, when the length of the dataset modulo batch_size equals 1,
# the last batch remains unaltered. This is a rare scenario that does not impact the training
# process, so we exclude it from testing by iterating only up to len - 1.
for i in range(len(tokenized_kl_dataset["answer_input_ids"]) - 1):
self.assertListEqual(
tokenized_dataset["prompt_input_ids"][i],
tokenized_kl_dataset["prompt_input_ids"][i],
)
self.assertListEqual(
tokenized_dataset["prompt_attention_mask"][i],
tokenized_kl_dataset["prompt_attention_mask"][i],
)
self.assertNotEqual(
tokenized_dataset["answer_input_ids"][i],
tokenized_kl_dataset["answer_input_ids"][i],
)
fn_kwargs = {
"prefix": "",
"is_encoder_decoder": trainer.is_encoder_decoder,
"tokenizer": trainer.tokenizer,
"max_length": trainer.max_length,
"truncation_mode": trainer.truncation_mode,
"label_pad_token_id": trainer.label_pad_token_id,
"max_prompt_length": trainer.max_prompt_length,
}
processed_dataset = tokenized_dataset.map(_process_tokens, fn_kwargs=fn_kwargs, num_proc=2)
self.assertListEqual(processed_dataset["prompt"][:], train_dataset["prompt"][:])
self.assertListEqual(processed_dataset["completion"][:], train_dataset["completion"][:])
self.assertListEqual(processed_dataset["label"][:], train_dataset["label"][:])
self.assertListEqual(processed_dataset["prompt_input_ids"][0], [46518, 374, 2664, 1091])
self.assertListEqual(processed_dataset["prompt_attention_mask"][0], [1, 1, 1, 1])
self.assertListEqual(processed_dataset["completion_input_ids"][0], [46518, 374, 2664, 1091, 27261, 13, 151645])
self.assertListEqual(processed_dataset["completion_attention_mask"][0], [1, 1, 1, 1, 1, 1, 1])
self.assertListEqual(processed_dataset["completion_labels"][0], [-100, -100, -100, -100, 27261, 13, 151645])
def test_kto_trainer_without_providing_ref_model(self):
training_args = KTOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
trainer = KTOTrainer(
model=self.model,
ref_model=None,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param, new_param))
@require_peft
def test_kto_trainer_without_providing_ref_model_with_lora(self):
from peft import LoraConfig
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
training_args = KTOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
trainer = KTOTrainer(
model=self.model,
ref_model=None,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
peft_config=lora_config,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
if "lora" in n:
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param, new_param))
@require_no_wandb
def test_kto_trainer_generate_during_eval_no_wandb(self):
training_args = KTOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
generate_during_eval=True,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
with self.assertRaisesRegex(
ValueError,
expected_regex="`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
" Please install `wandb` or `comet-ml` to resolve.",
):
KTOTrainer(
model=self.model,
ref_model=None,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
)
@require_peft
def test_kto_lora_save(self):
from peft import LoraConfig, get_peft_model
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
# lora model
model = AutoModelForCausalLM.from_pretrained(self.model_id)
model_peft = get_peft_model(model, lora_config)
training_args = KTOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
# kto train lora model with a lora config
trainer = KTOTrainer(
model=model_peft,
ref_model=None,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
peft_config=lora_config,
)
# train the model
trainer.train()
# save peft adapter
trainer.save_model()
# assert that the model is loaded without giving OSError
try:
AutoModelForCausalLM.from_pretrained(self.tmp_dir)
except OSError:
self.fail("Loading the saved peft adapter failed")
@require_liger_kernel
def test_kto_trainer_with_liger(self):
"""Test KTO trainer with Liger loss enabled."""
training_args = KTOConfig(
output_dir=self.tmp_dir,
report_to="none",
use_liger_loss=True, # Enable Liger loss
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
trainer = KTOTrainer(
model=self.model,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# check the params have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
# check the params have changed - ignore 0 biases
if param.sum() != 0:
self.assertFalse(torch.equal(param, new_param))
def test_compute_metrics(self):
model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5")
ref_model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5")
tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5")
tokenizer.pad_token = tokenizer.eos_token
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
def dummy_compute_metrics(*args, **kwargs):
return {"test": 0.0}
training_args = KTOConfig(
output_dir=self.tmp_dir,
remove_unused_columns=False,
per_device_train_batch_size=2,
do_eval=True,
eval_strategy="steps",
eval_steps=1,
per_device_eval_batch_size=2,
report_to="none",
)
trainer = KTOTrainer(
model=model,
ref_model=ref_model,
args=training_args,
processing_class=tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
compute_metrics=dummy_compute_metrics,
)
trainer.train()
self.assertEqual(trainer.state.log_history[-2]["eval_test"], 0.0)
| trl/tests/test_kto_trainer.py/0 | {
"file_path": "trl/tests/test_kto_trainer.py",
"repo_id": "trl",
"token_count": 8277
} | 599 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import subprocess
import psutil
import pytest
from transformers import AutoModelForCausalLM
from transformers.testing_utils import require_torch_multi_accelerator, torch_device
from trl.extras.vllm_client import VLLMClient
from trl.scripts.vllm_serve import chunk_list
from .testing_utils import TrlTestCase, require_3_accelerators
class TestChunkList(TrlTestCase):
def test_even_split(self):
self.assertEqual(chunk_list([1, 2, 3, 4, 5, 6], 2), [[1, 2, 3], [4, 5, 6]])
def test_uneven_split(self):
self.assertEqual(chunk_list([1, 2, 3, 4, 5, 6], 4), [[1, 2], [3, 4], [5], [6]])
def test_more_chunks_than_elements(self):
self.assertEqual(chunk_list([1, 2, 3, 4, 5, 6], 8), [[1], [2], [3], [4], [5], [6], [], []])
def test_n_equals_len(self):
self.assertEqual(chunk_list([1, 2, 3], 3), [[1], [2], [3]])
def test_n_is_1(self):
self.assertEqual(chunk_list([1, 2, 3], 1), [[1, 2, 3]])
def test_single_element_list(self):
self.assertEqual(chunk_list([42], 2), [[42], []])
def test_any_dtype(self):
self.assertEqual(
chunk_list([1, "two", 3.0, {"four": 4}, ["f", "i", "v", "e"]], 2),
[[1, "two", 3.0], [{"four": 4}, ["f", "i", "v", "e"]]],
)
@pytest.mark.slow
@require_torch_multi_accelerator
class TestVLLMClientServer(TrlTestCase):
model_id = "Qwen/Qwen2.5-1.5B"
@classmethod
def setUpClass(cls):
# We want the server to run on accelerator 1, so we set VISIBLE_DEVICES to "1"
env = os.environ.copy()
VISIBLE_DEVICES = "ZE_AFFINITY_MASK" if torch_device == "xpu" else "CUDA_VISIBLE_DEVICES"
env[VISIBLE_DEVICES] = "1" # Restrict to accelerator 1
# Start the server process
cls.server_process = subprocess.Popen(
["trl", "vllm-serve", "--model", cls.model_id], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
# Initialize the client
cls.client = VLLMClient(connection_timeout=240)
cls.client.init_communicator()
def test_generate(self):
prompts = ["Hello, AI!", "Tell me a joke"]
outputs = self.client.generate(prompts)
# Check that the output is a list
self.assertIsInstance(outputs, list)
# Check that the number of generated sequences is equal to the number of prompts
self.assertEqual(len(outputs), len(prompts))
# Check that the generated sequences are lists of integers
for seq in outputs:
self.assertTrue(all(isinstance(tok, int) for tok in seq))
def test_generate_with_params(self):
prompts = ["Hello, AI!", "Tell me a joke"]
outputs = self.client.generate(prompts, n=2, repetition_penalty=0.9, temperature=0.8, max_tokens=32)
# Check that the output is a list
self.assertIsInstance(outputs, list)
# Check that the number of generated sequences is 2 times the number of prompts
self.assertEqual(len(outputs), 2 * len(prompts))
# Check that the generated sequences are lists of integers
for seq in outputs:
self.assertTrue(all(isinstance(tok, int) for tok in seq))
# Check that the length of the generated sequences is less than or equal to 32
for seq in outputs:
self.assertLessEqual(len(seq), 32)
def test_update_model_params(self):
model = AutoModelForCausalLM.from_pretrained(self.model_id, device_map=torch_device)
self.client.update_model_params(model)
def test_reset_prefix_cache(self):
# Test resetting the prefix cache
self.client.reset_prefix_cache()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
# Close the client
cls.client.close_communicator()
# vLLM x pytest (or Popen) seems not to handle process termination well. To avoid zombie processes, we need to
# kill the server process and its children explicitly.
parent = psutil.Process(cls.server_process.pid)
children = parent.children(recursive=True)
for child in children:
child.send_signal(signal.SIGTERM)
cls.server_process.terminate()
cls.server_process.wait()
# Same as above but using base_url to instantiate the client.
@pytest.mark.slow
@require_torch_multi_accelerator
class TestVLLMClientServerBaseURL(TrlTestCase):
model_id = "Qwen/Qwen2.5-1.5B"
@classmethod
def setUpClass(cls):
# We want the server to run on accelerator 1, so we set VISIBLE_DEVICES to "1"
env = os.environ.copy()
VISIBLE_DEVICES = "ZE_AFFINITY_MASK" if torch_device == "xpu" else "CUDA_VISIBLE_DEVICES"
env[VISIBLE_DEVICES] = "1" # Restrict to accelerator 1
# Start the server process
cls.server_process = subprocess.Popen(
["trl", "vllm-serve", "--model", cls.model_id], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
# Initialize the client
cls.client = VLLMClient(base_url="http://localhost:8000", connection_timeout=240)
cls.client.init_communicator()
def test_generate(self):
prompts = ["Hello, AI!", "Tell me a joke"]
outputs = self.client.generate(prompts)
# Check that the output is a list
self.assertIsInstance(outputs, list)
# Check that the number of generated sequences is equal to the number of prompts
self.assertEqual(len(outputs), len(prompts))
# Check that the generated sequences are lists of integers
for seq in outputs:
self.assertTrue(all(isinstance(tok, int) for tok in seq))
def test_generate_with_params(self):
prompts = ["Hello, AI!", "Tell me a joke"]
outputs = self.client.generate(prompts, n=2, repetition_penalty=0.9, temperature=0.8, max_tokens=32)
# Check that the output is a list
self.assertIsInstance(outputs, list)
# Check that the number of generated sequences is 2 times the number of prompts
self.assertEqual(len(outputs), 2 * len(prompts))
# Check that the generated sequences are lists of integers
for seq in outputs:
self.assertTrue(all(isinstance(tok, int) for tok in seq))
# Check that the length of the generated sequences is less than or equal to 32
for seq in outputs:
self.assertLessEqual(len(seq), 32)
def test_update_model_params(self):
model = AutoModelForCausalLM.from_pretrained(self.model_id, device_map=torch_device)
self.client.update_model_params(model)
def test_reset_prefix_cache(self):
# Test resetting the prefix cache
self.client.reset_prefix_cache()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
# Close the client
cls.client.close_communicator()
# vLLM x pytest (or Popen) seems not to handle process termination well. To avoid zombie processes, we need to
# kill the server process and its children explicitly.
parent = psutil.Process(cls.server_process.pid)
children = parent.children(recursive=True)
for child in children:
child.send_signal(signal.SIGTERM)
cls.server_process.terminate()
cls.server_process.wait()
@pytest.mark.slow
@require_3_accelerators
class TestVLLMClientServerTP(TrlTestCase):
model_id = "Qwen/Qwen2.5-1.5B"
@classmethod
def setUpClass(cls):
# We want the server to run on accelerator 1 and 2, so we set VISIBLE_DEVICES to "1,2"
env = os.environ.copy()
VISIBLE_DEVICES = "ZE_AFFINITY_MASK" if torch_device == "xpu" else "CUDA_VISIBLE_DEVICES"
env[VISIBLE_DEVICES] = "1,2" # Restrict to accelerator 1 and 2
# Start the server process
cls.server_process = subprocess.Popen(
["trl", "vllm-serve", "--model", cls.model_id, "--tensor_parallel_size", "2"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
# Initialize the client
cls.client = VLLMClient(connection_timeout=240)
cls.client.init_communicator()
def test_generate(self):
prompts = ["Hello, AI!", "Tell me a joke"]
outputs = self.client.generate(prompts)
# Check that the output is a list
self.assertIsInstance(outputs, list)
# Check that the number of generated sequences is equal to the number of prompts
self.assertEqual(len(outputs), len(prompts))
# Check that the generated sequences are lists of integers
for seq in outputs:
self.assertTrue(all(isinstance(tok, int) for tok in seq))
def test_update_model_params(self):
model = AutoModelForCausalLM.from_pretrained(self.model_id, device_map=torch_device)
self.client.update_model_params(model)
def test_reset_prefix_cache(self):
# Test resetting the prefix cache
self.client.reset_prefix_cache()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
# Close the client
cls.client.close_communicator()
# vLLM x pytest (or Popen) seems not to handle process termination well. To avoid zombie processes, we need to
# kill the server process and its children explicitly.
parent = psutil.Process(cls.server_process.pid)
children = parent.children(recursive=True)
for child in children:
child.send_signal(signal.SIGTERM)
cls.server_process.terminate()
cls.server_process.wait()
@pytest.mark.slow
@require_3_accelerators
class TestVLLMClientServerDP(TrlTestCase):
model_id = "Qwen/Qwen2.5-1.5B"
@classmethod
def setUpClass(cls):
# We want the server to run on accelerator 1 and 2, so we set VISIBLE_DEVICES to "1,2"
env = os.environ.copy()
VISIBLE_DEVICES = "ZE_AFFINITY_MASK" if torch_device == "xpu" else "CUDA_VISIBLE_DEVICES"
env[VISIBLE_DEVICES] = "1,2" # Restrict to accelerator 1 and 2
# Start the server process
cls.server_process = subprocess.Popen(
["trl", "vllm-serve", "--model", cls.model_id, "--data_parallel_size", "2"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
# Initialize the client
cls.client = VLLMClient(connection_timeout=240)
cls.client.init_communicator()
def test_generate(self):
prompts = ["Hello, AI!", "Tell me a joke"]
outputs = self.client.generate(prompts)
# Check that the output is a list
self.assertIsInstance(outputs, list)
# Check that the number of generated sequences is equal to the number of prompts
self.assertEqual(len(outputs), len(prompts))
# Check that the generated sequences are lists of integers
for seq in outputs:
self.assertTrue(all(isinstance(tok, int) for tok in seq))
def test_update_model_params(self):
model = AutoModelForCausalLM.from_pretrained(self.model_id, device_map=torch_device)
self.client.update_model_params(model)
def test_reset_prefix_cache(self):
# Test resetting the prefix cache
self.client.reset_prefix_cache()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
# Close the client
cls.client.close_communicator()
# vLLM x pytest (or Popen) seems not to handle process termination well. To avoid zombie processes, we need to
# kill the server process and its children explicitly.
parent = psutil.Process(cls.server_process.pid)
children = parent.children(recursive=True)
for child in children:
child.send_signal(signal.SIGTERM)
cls.server_process.terminate()
cls.server_process.wait()
@pytest.mark.slow
@require_torch_multi_accelerator
class TestVLLMClientServerDeviceParameter(TrlTestCase):
"""Test the device parameter functionality in init_communicator."""
model_id = "Qwen/Qwen2.5-1.5B"
@classmethod
def setUpClass(cls):
# We want the server to run on accelerator 1, so we set VISIBLE_DEVICES to "1"
env = os.environ.copy()
VISIBLE_DEVICES = "ZE_AFFINITY_MASK" if torch_device == "xpu" else "CUDA_VISIBLE_DEVICES"
env[VISIBLE_DEVICES] = "1" # Restrict to accelerator 1
# Start the server process
cls.server_process = subprocess.Popen(
["trl", "vllm-serve", "--model", cls.model_id], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
def test_init_communicator_with_device_int(self):
"""Test init_communicator with integer device parameter."""
client = VLLMClient(connection_timeout=240)
client.init_communicator(device=0) # Explicitly specify device 0
# Test basic functionality
prompts = ["Hello, AI!"]
outputs = client.generate(prompts)
self.assertIsInstance(outputs, list)
self.assertEqual(len(outputs), len(prompts))
client.close_communicator()
def test_init_communicator_with_device_string(self):
"""Test init_communicator with string device parameter."""
client = VLLMClient(connection_timeout=240)
client.init_communicator(device="cuda:0") # Explicitly specify device as string
# Test basic functionality
prompts = ["Hello, AI!"]
outputs = client.generate(prompts)
self.assertIsInstance(outputs, list)
self.assertEqual(len(outputs), len(prompts))
client.close_communicator()
def test_init_communicator_with_torch_device(self):
"""Test init_communicator with torch.device object."""
import torch
client = VLLMClient(connection_timeout=240)
device = torch.device("cuda:0")
client.init_communicator(device=device) # Explicitly specify torch.device object
# Test basic functionality
prompts = ["Hello, AI!"]
outputs = client.generate(prompts)
self.assertIsInstance(outputs, list)
self.assertEqual(len(outputs), len(prompts))
client.close_communicator()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
# vLLM x pytest (or Popen) seems not to handle process termination well. To avoid zombie processes, we need to
# kill the server process and its children explicitly.
parent = psutil.Process(cls.server_process.pid)
children = parent.children(recursive=True)
for child in children:
child.send_signal(signal.SIGTERM)
cls.server_process.terminate()
cls.server_process.wait()
| trl/tests/test_vllm_client_server.py/0 | {
"file_path": "trl/tests/test_vllm_client_server.py",
"repo_id": "trl",
"token_count": 6258
} | 600 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional, Union
import torch
from transformers import GenerationConfig, PreTrainedTokenizer, PreTrainedTokenizerFast, set_seed
from ..models import SUPPORTED_ARCHITECTURES, PreTrainedModelWrapper
class BestOfNSampler:
def __init__(
self,
model: PreTrainedModelWrapper,
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
queries_to_scores: Callable[[list[str]], list[float]],
length_sampler: Any,
sample_size: int = 4,
seed: Optional[int] = None,
n_candidates: int = 1,
generation_config: Optional[GenerationConfig] = None,
) -> None:
r"""
Initialize the sampler for best-of-n generation
Args:
model (`PreTrainedModelWrapper`):
The pretrained model to use for generation
tokenizer (`PreTrainedTokenizer` or `PreTrainedTokenizerFast`):
Tokenizer associated with the pretrained model
queries_to_scores (`Callable[[list[str]], list[float]]`):
Callable that takes a list of generated texts and returns the associated reward scores
length_sampler (`Any`):
Sampler used to sample the length of the generated text
sample_size (`int`):
Number of samples to generate for each query
seed (`int`, *optional*):
Random seed used to control generation
n_candidates (`int`):
Number of candidates to return for each query
generation_config (`GenerationConfig`, *optional*):
Generation config passed to the underlying model's `generate` method. See `GenerationConfig`
(https://huggingface.co/docs/transformers/v4.29.1/en/main_classes/text_generation#transformers.GenerationConfig)
for more details
"""
if seed is not None:
set_seed(seed)
if not isinstance(tokenizer, (PreTrainedTokenizer, PreTrainedTokenizerFast)):
raise ValueError(
f"tokenizer must be a PreTrainedTokenizer or PreTrainedTokenizerFast, got {type(tokenizer)}"
)
if not isinstance(model, (SUPPORTED_ARCHITECTURES)):
raise ValueError(
f"model must be a PreTrainedModelWrapper, got {type(model)} - supported architectures are: {SUPPORTED_ARCHITECTURES}"
)
self.model = model
self.tokenizer = tokenizer
self.queries_to_scores = queries_to_scores
self.length_sampler = length_sampler
self.gen_config = generation_config
self.sample_size = sample_size
self.n_candidates = n_candidates
def generate(
self,
tokenized_query: Union[list[int], torch.Tensor, list[torch.Tensor], list[list[int]]],
skip_special_tokens: bool = True,
device: Optional[Union[str, torch.device]] = None,
**generation_kwargs,
) -> list[list[str]]:
r"""
Generate the best of n samples for input queries
Args:
tokenized_query (`list[int]` or `torch.Tensor` or `list[torch.Tensor]` or `list[int]`):
represents either a single tokenized query (a single tensor or a list of integers) or a batch of
tokenized queries (a list of tensors or a list of lists of integers)
skip_special_tokens (`bool`):
Whether to remove the special tokens from the output
device (`str` or `torch.device`, *optional*):
The device on which the model will be loaded
**generation_kwargs (`dict`, *optional*):
Additional keyword arguments passed along to the underlying model's `generate` method. This is used to
override generation config
Returns:
list[list[str]]: A list of lists of generated texts
"""
queries = None
if isinstance(tokenized_query, torch.Tensor) and tokenized_query.ndim == 1:
queries = tokenized_query.unsqueeze(0)
elif isinstance(tokenized_query, list):
element_type = type(tokenized_query[0])
if element_type is int:
queries = torch.tensor(tokenized_query).unsqueeze(0)
elif element_type is torch.Tensor:
queries = [tensor.reshape((1, -1)) for tensor in tokenized_query]
else:
queries = [torch.tensor(query).reshape((1, -1)) for query in tokenized_query]
result = []
for query in queries:
queries = query.repeat((self.sample_size, 1))
output = self.model.generate(
queries.to(device),
max_new_tokens=self.length_sampler(),
generation_config=self.gen_config,
**generation_kwargs,
).squeeze()
output = self.tokenizer.batch_decode(output, skip_special_tokens=skip_special_tokens)
scores = torch.tensor(self.queries_to_scores(output))
output = [output[i] for i in scores.topk(self.n_candidates).indices]
result.append(output)
return result
| trl/trl/extras/best_of_n_sampler.py/0 | {
"file_path": "trl/trl/extras/best_of_n_sampler.py",
"repo_id": "trl",
"token_count": 2439
} | 601 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def think_format_reward(completions: list[list[dict[str, str]]], **kwargs) -> list[float]:
r"""
Reward function that checks if the reasoning process is enclosed within `"<think>"` and `"</think>"` tags. The
function returns a reward of 1.0 if the format is correct, otherwise 0.0.
Args:
completions (`list[list[dict[str, str]]]`):
List of completions to be evaluated. Each completion must be a list of one message, i.e. a dictionary
containing the key `"content"` with the value being the text of the completion.
**kwargs:
Additional keyword arguments. This function does not use them, but they are required in the function
signature to ensure compatibility with trainers like [`GRPOTrainer`].
Returns:
`list[float]`:
A list of rewards, where each reward is 1.0 if the completion matches the expected format, otherwise 0.0.
Example:
```python
>>> from trl.rewards import think_format_reward
>>> completions = [
... [{"content": "<think>\nThis is my reasoning.\n</think>\nThis is my answer."}],
... [{"content": "<think>\nThis is my reasoning.\nThis is my answer."}],
... ]
>>> think_format_reward(completions)
[1.0, 0.0]
```
"""
pattern = r"^<think>(?!.*<think>)(.*?)</think>.*$"
completion_contents = [completion[0]["content"] for completion in completions]
matches = [re.match(pattern, content, re.DOTALL | re.MULTILINE) for content in completion_contents]
return [1.0 if match else 0.0 for match in matches]
| trl/trl/rewards/format_rewards.py/0 | {
"file_path": "trl/trl/rewards/format_rewards.py",
"repo_id": "trl",
"token_count": 744
} | 602 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Optional, Union
import pandas as pd
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.utils import gather_object, is_wandb_available
from transformers import (
GenerationConfig,
PreTrainedModel,
PreTrainedTokenizerBase,
Trainer,
TrainerCallback,
TrainerControl,
TrainerState,
TrainingArguments,
)
from transformers.trainer_utils import has_length
from transformers.utils import is_rich_available
from ..data_utils import maybe_apply_chat_template
from ..import_utils import is_mergekit_available
from ..mergekit_utils import MergeConfig, merge_models, upload_model_to_hf
from ..models.utils import unwrap_model_for_generation
from .judges import BasePairwiseJudge
from .utils import log_table_to_comet_experiment
if is_rich_available():
from rich.console import Console, Group
from rich.live import Live
from rich.panel import Panel
from rich.progress import Progress
if is_wandb_available():
import wandb
# Logger for module-level logging
logger = logging.getLogger(__name__)
def _generate_completions(
prompts: list[str],
model: PreTrainedModel,
tokenizer: PreTrainedTokenizerBase,
accelerator: Accelerator,
generation_config: Optional[GenerationConfig],
batch_size: int = 1,
) -> list[str]:
"""
Generates completions for a list of pre-formatted prompts from the given model.
Args:
prompts (list[str]): A list of input prompts for which completions are to be generated.
model (PreTrainedModel): The pre-trained model to be used for generation.
tokenizer (PreTrainedTokenizerBase): The tokenizer to be used for encoding and decoding.
accelerator (Accelerator): The accelerator to be used for model execution.
generation_config (GenerationConfig): Configuration for text generation.
batch_size (int, optional): The number of prompts to process in each batch. Default is 1.
Returns:
list[str]: A list of generated text completions corresponding to the input prompts.
"""
completions = []
with unwrap_model_for_generation(model, accelerator) as unwrapped_model:
for idx in range(0, len(prompts), batch_size):
batch = prompts[idx : idx + batch_size]
tokenized_batch = tokenizer(batch, return_tensors="pt", padding=True, truncation=True).to(model.device)
generations = unwrapped_model.generate(
**tokenized_batch,
generation_config=generation_config,
)
for prompt, generation in zip(tokenized_batch.input_ids, generations):
# Remove prompt from generation
generation = generation[len(prompt) :]
completion = tokenizer.decode(generation, skip_special_tokens=True)
completions.append(completion)
return completions
class SyncRefModelCallback(TrainerCallback):
"""
Callback to synchronize the model with a reference model.
"""
def __init__(
self,
ref_model: Union[PreTrainedModel, torch.nn.Module],
accelerator: Optional[Accelerator],
):
self.accelerator = accelerator
self.ref_model = ref_model
@staticmethod
def _sync_target_model(model, target_model, alpha):
for target_param, copy_param in zip(target_model.parameters(), model.parameters()):
target_param.data.mul_(1.0 - alpha).add_(copy_param.data, alpha=alpha)
@staticmethod
def sync_target_model(model, target_model, alpha):
deepspeed_plugin = AcceleratorState().deepspeed_plugin
if deepspeed_plugin is not None and deepspeed_plugin.zero_stage == 3:
import deepspeed
with deepspeed.zero.GatheredParameters(
list(model.parameters()) + list(target_model.parameters()), modifier_rank=0
):
if deepspeed.comm.get_rank() == 0:
SyncRefModelCallback._sync_target_model(model, target_model, alpha)
else:
SyncRefModelCallback._sync_target_model(model, target_model, alpha)
def on_step_end(self, args, state, control, **kwargs):
model: PreTrainedModel = kwargs["model"]
if self.ref_model is not None and state.global_step % args.ref_model_sync_steps == 0:
if self.accelerator:
model = self.accelerator.unwrap_model(model)
self.sync_target_model(model, self.ref_model, args.ref_model_mixup_alpha)
class RichProgressCallback(TrainerCallback):
"""
A [`TrainerCallback`] that displays the progress of training or evaluation using Rich.
"""
def __init__(self):
if not is_rich_available():
raise ImportError("RichProgressCallback requires the `rich` extra. To install, run `pip install rich`.")
self.training_bar = None
self.prediction_bar = None
self.training_task_id = None
self.prediction_task_id = None
self.rich_group = None
self.rich_console = None
self.training_status = None
self.current_step = None
def on_train_begin(self, args, state, control, **kwargs):
if state.is_world_process_zero:
self.training_bar = Progress()
self.prediction_bar = Progress()
self.rich_console = Console()
self.training_status = self.rich_console.status("Nothing to log yet ...")
self.rich_group = Live(Panel(Group(self.training_bar, self.prediction_bar, self.training_status)))
self.rich_group.start()
self.training_task_id = self.training_bar.add_task("[blue]Training the model", total=state.max_steps)
self.current_step = 0
def on_step_end(self, args, state, control, **kwargs):
if state.is_world_process_zero:
self.training_bar.update(self.training_task_id, advance=state.global_step - self.current_step, update=True)
self.current_step = state.global_step
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if state.is_world_process_zero and has_length(eval_dataloader):
if self.prediction_task_id is None:
self.prediction_task_id = self.prediction_bar.add_task(
"[blue]Predicting on the evaluation dataset", total=len(eval_dataloader)
)
self.prediction_bar.update(self.prediction_task_id, advance=1, update=True)
def on_evaluate(self, args, state, control, **kwargs):
if state.is_world_process_zero:
if self.prediction_task_id is not None:
self.prediction_bar.remove_task(self.prediction_task_id)
self.prediction_task_id = None
def on_predict(self, args, state, control, **kwargs):
if state.is_world_process_zero:
if self.prediction_task_id is not None:
self.prediction_bar.remove_task(self.prediction_task_id)
self.prediction_task_id = None
def on_log(self, args, state, control, logs=None, **kwargs):
if state.is_world_process_zero and self.training_bar is not None:
_ = logs.pop("total_flos", None)
self.training_status.update(f"[bold green]Status = {str(logs)}")
def on_train_end(self, args, state, control, **kwargs):
if state.is_world_process_zero:
self.rich_group.stop()
self.training_bar = None
self.prediction_bar = None
self.training_task_id = None
self.prediction_task_id = None
self.rich_group = None
self.rich_console = None
self.training_status = None
self.current_step = None
def _win_rate_completions_df(
state: TrainerState, prompts: list[str], completions: list[str], winner_indices: list[str]
) -> pd.DataFrame:
global_step = [str(state.global_step)] * len(prompts)
data = list(zip(global_step, prompts, completions, winner_indices))
# Split completions from reference model and policy
split_data = [(item[0], item[1], item[2][0], item[2][1], item[3]) for item in data]
return pd.DataFrame(split_data, columns=["step", "prompt", "reference_model", "policy", "winner_index"])
class WinRateCallback(TrainerCallback):
"""
A [`~transformers.TrainerCallback`] that computes the win rate of a model based on a reference.
It generates completions using prompts from the evaluation dataset and compares the trained model's outputs against
a reference. The reference is either the initial version of the model (before training) or the reference model, if
available in the trainer. During each evaluation step, a judge determines how often the trained model's completions
win against the reference using a judge. The win rate is then logged in the trainer's logs under the key
`"eval_win_rate"`.
Usage:
```python
trainer = DPOTrainer(...)
judge = PairRMJudge()
win_rate_callback = WinRateCallback(judge=judge, trainer=trainer)
trainer.add_callback(win_rate_callback)
```
Args:
judge (`BasePairwiseJudge`):
The judge to use for comparing completions.
trainer (`Trainer`):
Trainer to which the callback will be attached. The trainer's evaluation dataset must include a `"prompt"`
column containing the prompts for generating completions. If the `Trainer` has a reference model (via the
`ref_model` attribute), it will use this reference model for generating the reference completions;
otherwise, it defaults to using the initial model.
generation_config (`GenerationConfig`, *optional*):
The generation config to use for generating completions.
num_prompts (`int` or `None`, *optional*, defaults to `None`):
The number of prompts to generate completions for. If not provided, defaults to the number of examples in
the evaluation dataset.
shuffle_order (`bool`, *optional*, defaults to `True`):
Whether to shuffle the order of the completions before judging.
use_soft_judge (`bool`, *optional*, defaults to `False`):
Whether to use a soft judge that returns a win probability between 0 and 1 for the first completion vs the
second.
"""
def __init__(
self,
judge: BasePairwiseJudge,
trainer: Trainer,
generation_config: Optional[GenerationConfig] = None,
num_prompts: Optional[int] = None,
shuffle_order: bool = True,
use_soft_judge: bool = False,
):
self.judge = judge
self.trainer = trainer
self.shuffle_order = shuffle_order
self.generation_config = generation_config
self.ref_completions = []
self.use_soft_judge = use_soft_judge
if self.trainer.eval_dataset is None:
raise ValueError("Trainer must have an evaluation dataset to use the WinRateCallback.")
else:
self.eval_dataset = self.trainer.eval_dataset
if num_prompts is not None:
self.eval_dataset = self.eval_dataset.select(range(num_prompts))
def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# When the trainer is initialized, we generate completions for the reference model.
tokenizer = kwargs["processing_class"]
tokenizer.padding_side = "left"
accelerator = self.trainer.accelerator
# Use the reference model if available, otherwise use the initial model
model = getattr(self.trainer, "ref_model", None)
# At this point, there are two cases where `ref_model` is None:
# 1. The method doesn't require a reference model.
# 2. The method uses a reference model, but `ref_model` is set to None.
# This occurs when using PEFT, where the reference model can be obtained by simply disabling the model's adapter.
# In theory, we should disable the adapter here, but since it's zero-initialized at the start of training,
# the model behaves identically with or without the adapter.
# Therefore, there's no need to explicitly disable it at this point.
if model is None:
model = self.trainer.model_wrapped
with accelerator.split_between_processes(self.eval_dataset["prompt"]) as prompts:
self.ref_completions = _generate_completions(
prompts,
model=model,
tokenizer=tokenizer,
accelerator=accelerator,
generation_config=self.generation_config,
batch_size=args.per_device_eval_batch_size,
)
# Compute initial win rate as a reference point
completions = list(zip(self.ref_completions, self.ref_completions))
if self.use_soft_judge:
ref_win_probs = self.judge.judge(prompts, completions, self.shuffle_order, return_scores=True)
winner_indices = [0 if score > 0.5 else 1 for score in ref_win_probs]
ref_win_probs = gather_object(ref_win_probs)
else:
winner_indices = self.judge.judge(prompts, completions, self.shuffle_order)
prompts = gather_object(prompts)
completions = gather_object(completions)
winner_indices = gather_object(winner_indices)
# Logging
if self.trainer.accelerator.is_main_process:
win_rate = sum(winner_idx == 1 for winner_idx in winner_indices) / len(winner_indices)
if self.use_soft_judge:
avg_win_prob = 1.0 - sum(ref_win_probs) / len(ref_win_probs)
self.trainer.log({"eval_avg_win_prob": avg_win_prob, "eval_win_rate": win_rate})
else:
self.trainer.log({"eval_win_rate": win_rate})
if "wandb" in args.report_to:
import wandb
if wandb.run is not None:
df = _win_rate_completions_df(
state=state,
prompts=prompts,
completions=completions,
winner_indices=winner_indices,
)
wandb.log({"win_rate_completions": wandb.Table(dataframe=df)})
if "comet_ml" in args.report_to:
df = _win_rate_completions_df(
state=state,
prompts=prompts,
completions=completions,
winner_indices=winner_indices,
)
log_table_to_comet_experiment(
name="win_rate_completions.csv",
table=df,
)
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
# At every evaluation step, we generate completions for the model and compare them with the reference
# completions that have been generated at the beginning of training. We then compute the win rate and log it to
# the trainer.
tokenizer = kwargs["processing_class"]
tokenizer.padding_side = "left"
accelerator = self.trainer.accelerator
model = self.trainer.model_wrapped
with accelerator.split_between_processes(self.eval_dataset["prompt"]) as prompts:
completions = _generate_completions(
prompts,
model=model,
tokenizer=tokenizer,
accelerator=accelerator,
generation_config=self.generation_config,
batch_size=args.per_device_eval_batch_size,
)
completions = list(zip(self.ref_completions, completions))
if self.use_soft_judge:
ref_win_probs = self.judge.judge(prompts, completions, self.shuffle_order, return_scores=True)
winner_indices = [0 if score > 0.5 else 1 for score in ref_win_probs]
ref_win_probs = gather_object(ref_win_probs)
else:
winner_indices = self.judge.judge(prompts, completions, self.shuffle_order)
prompts = gather_object(prompts)
completions = gather_object(completions)
winner_indices = gather_object(winner_indices)
# Logging
if self.trainer.accelerator.is_main_process:
win_rate = sum(winner_idx == 1 for winner_idx in winner_indices) / len(winner_indices)
if self.use_soft_judge:
avg_win_prob = 1.0 - sum(ref_win_probs) / len(ref_win_probs)
self.trainer.log({"eval_avg_win_prob": avg_win_prob, "eval_win_rate": win_rate})
else:
self.trainer.log({"eval_win_rate": win_rate})
if "wandb" in args.report_to:
import wandb
if wandb.run is not None:
df = _win_rate_completions_df(
state=state,
prompts=prompts,
completions=completions,
winner_indices=winner_indices,
)
wandb.log({"win_rate_completions": wandb.Table(dataframe=df)})
if "comet_ml" in args.report_to:
df = _win_rate_completions_df(
state=state,
prompts=prompts,
completions=completions,
winner_indices=winner_indices,
)
log_table_to_comet_experiment(
name="win_rate_completions.csv",
table=df,
)
class LogCompletionsCallback(TrainerCallback):
r"""
A [`~transformers.TrainerCallback`] that logs completions to Weights & Biases and/or Comet.
Usage:
```python
trainer = DPOTrainer(...)
completions_callback = LogCompletionsCallback(trainer=trainer)
trainer.add_callback(completions_callback)
```
Args:
trainer (`Trainer`):
Trainer to which the callback will be attached. The trainer's evaluation dataset must include a `"prompt"`
column containing the prompts for generating completions.
generation_config (`GenerationConfig`, *optional*):
The generation config to use for generating completions.
num_prompts (`int` or `None`, *optional*):
The number of prompts to generate completions for. If not provided, defaults to the number of examples in
the evaluation dataset.
freq (`int` or `None`, *optional*):
The frequency at which to log completions. If not provided, defaults to the trainer's `eval_steps`.
"""
def __init__(
self,
trainer: Trainer,
generation_config: Optional[GenerationConfig] = None,
num_prompts: Optional[int] = None,
freq: Optional[int] = None,
):
self.trainer = trainer
self.generation_config = generation_config
self.freq = freq
self.table = []
self._last_logged_step = -1
if self.trainer.eval_dataset is None:
raise ValueError("Trainer must have an evaluation dataset to use the LogCompletionsCallback.")
else:
self.eval_dataset = self.trainer.eval_dataset
if num_prompts is not None:
self.eval_dataset = self.eval_dataset.select(range(num_prompts))
def on_step_end(self, args, state, control, **kwargs):
# Only log once per step (this method may be called multiple times)
if state.global_step == self._last_logged_step:
return
# Only log every `freq` steps (if no `freq` is provided, log every `eval_steps` steps)
freq = self.freq or state.eval_steps
if state.global_step % freq != 0:
return
tokenizer = kwargs["processing_class"]
tokenizer.padding_side = "left"
accelerator = self.trainer.accelerator
model = self.trainer.model_wrapped
with accelerator.split_between_processes(self.eval_dataset["prompt"]) as prompts:
prompts = [maybe_apply_chat_template({"prompt": prompt}, tokenizer)["prompt"] for prompt in prompts]
completions = _generate_completions(
prompts,
model=model,
tokenizer=tokenizer,
accelerator=accelerator,
generation_config=self.generation_config,
batch_size=args.per_device_eval_batch_size,
)
completions = gather_object(completions)
prompts = gather_object(prompts)
# Build the data to log
if self.trainer.accelerator.is_main_process:
global_step = [str(state.global_step)] * len(prompts)
data = list(zip(global_step, prompts, completions))
self.table.extend(data)
table = pd.DataFrame(columns=["step", "prompt", "completion"], data=self.table)
if "wandb" in args.report_to:
wandb.log({"completions": table})
if "comet_ml" in args.report_to:
log_table_to_comet_experiment(
name="completions.csv",
table=table,
)
# Save the last logged step, so we don't log the same completions multiple times
self._last_logged_step = state.global_step
class MergeModelCallback(TrainerCallback):
r"""
A [`~transformers.TrainerCallback`] that merges the policy model (the model being trained) with another model based
on a merge configuration.
Args:
merge_config ([`MergeConfig`], *optional*, defaults to `None`):
Configuration used for the merging process. If not provided, the default [`MergeConfig`] is used.
merge_at_every_checkpoint (`bool`, *optional*, defaults to `False`):
Whether to merge the model at every checkpoint.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether to push the merged model to the Hub after merging.
Example:
```python
from trl.mergekit_utils import MergeConfig
from trl import MergeModelCallback
config = MergeConfig()
merge_callback = MergeModelCallback(config)
trainer = DPOTrainer(..., callbacks=[merge_callback])
```
"""
def __init__(
self,
merge_config: Optional["MergeConfig"] = None,
merge_at_every_checkpoint: bool = False,
push_to_hub: bool = False,
):
if not is_mergekit_available():
raise ImportError(
"MergeModelCallback requires the `mergekit` extra. To install, run `pip install mergekit`."
)
self.merge_config = merge_config or MergeConfig()
self.merge_at_every_checkpoint = merge_at_every_checkpoint
self.push_to_hub = push_to_hub
def _merge_and_maybe_push(self, output_dir, global_step, model):
checkpoint_path = os.path.join(output_dir, f"checkpoint-{global_step}")
self.merge_config.policy_model_path = checkpoint_path
if self.merge_config.target_model_path is None:
self.merge_config.target_model_path = model.config._name_or_path
merge_path = os.path.join(checkpoint_path, "merged")
merge_models(self.merge_config.create(), merge_path)
if self.push_to_hub:
repo_name = f"{output_dir}_checkpoint-{global_step}_merged"
upload_model_to_hf(merge_path, repo_name)
def on_save(self, args, state, control, model=None, **kwargs):
if self.merge_at_every_checkpoint:
self._merge_and_maybe_push(args.output_dir, state.global_step, model)
def on_train_end(self, args, state, control, model=None, **kwargs):
if not self.merge_at_every_checkpoint:
self._merge_and_maybe_push(args.output_dir, state.global_step, model)
class BEMACallback(TrainerCallback):
# docstyle-ignore
r"""
A [`~transformers.TrainerCallback`] that implements [BEMA](https://huggingface.co/papers/2508.00180)
(Bias-Corrected Exponential Moving Average) by [Adam Block](https://huggingface.co/abblock) and [Cyril
Zhang](https://huggingface.co/cyrilzhang). Code from https://github.com/abblock/bema under MIT license.
BEMA computes model weights that scale like:
$$
\theta_t' = \alpha_t \cdot (\theta_t - \theta_0) + \text{EMA}_t
$$
where \\( \theta_t \\) is the current model weights, \\( \theta_0 \\) is a snapshot of the model weights at the
first `update_after` step, \\( \text{EMA}_t \\) is the exponential moving average of the model weights, and
\\( \alpha_t \\) is a scaling factor that decays with the number of steps \\( t \\) as
$$
\alpha_t = (\rho + \gamma \cdot t)^{-\eta}.
$$
The EMA is computed as:
$$
\text{EMA}_t = (1 - \beta_t) \cdot \text{EMA}_{t-1} + \beta_t \cdot \theta_t
$$
where \\( \beta_t \\) is a decay factor that decays with the number of steps \\( t \\) as
$$
\beta_t = (\rho + \gamma \cdot t)^{-\kappa}.
$$
Args:
update_freq (`int`, *optional*, defaults to `400`):
Update the BEMA weights every X steps. Denoted this as \\( \phi \\) in the paper.
ema_power (`float`, *optional*, defaults to `0.5`):
Power for the EMA decay factor. Denoted \\( \kappa \\) in the paper. To disable EMA, set this to `0.0`.
bias_power (`float`, *optional*, defaults to `0.2`):
Power for the BEMA scaling factor. Denoted \\( \eta \\) in the paper. To disable BEMA, set this to `0.0`.
lag (`int`, *optional*, defaults to `10`):
Initial offset in the weight decay schedule that controls early-stage smoothness by acting as a virtual
starting age for the updates. Denoted as \\( \rho \\) in the paper.
update_after (`int`, *optional*, defaults to `0`):
Burn-in time before starting to update the BEMA weights. Denoted \\( \tau \\) in the paper.
multiplier (`float`, *optional*, defaults to `1.0`):
Initial value for the EMA decay factor. Denoted as \\( \gamma \\) in the paper.
min_ema_multiplier (`float`, *optional*, defaults to `0.0`):
Minimum value for the EMA decay factor.
device (`str`, *optional*, defaults to `"cpu"`):
Device to use for the BEMA buffers, e.g. `"cpu"` or `"cuda"`. Note that in most cases, this device SHOULD
BE DIFFERENT from the device used for training in order to avoid OOM.
Example:
```python
from trl import BEMACallback
trainer = Trainer(..., callbacks=[BEMACallback()])
```
"""
def __init__(
self,
update_freq: int = 400,
ema_power: float = 0.5,
bias_power: float = 0.2,
lag: int = 10,
update_after: int = 0,
multiplier: float = 1.0,
min_ema_multiplier: float = 0.0,
device: str = "cpu",
):
# User-provided hyperparams
self.update_freq = update_freq
self.ema_power = ema_power
self.bias_power = bias_power
self.lag = lag
self.update_after = update_after
self.multiplier = multiplier
self.min_ema_multiplier = min_ema_multiplier
self.device = device
# Internal state
self.param_names = [] # references to training model param names
self.thetat_params = [] # references to training model params
self.theta0_params = [] # θ₀ buffers (on self.device)
self.ema_params = [] # EMA buffers (on self.device)
self.running_model = None # a copy of the model to run BEMA on
@staticmethod
def _unwrap_model(model):
"""
Helper function to unwrap model from various wrappers including DataParallel, DistributedDataParallel,
DeepSpeed, and FSDP.
"""
# Handle DeepSpeed
if hasattr(model, "module") and hasattr(model, "engine"):
# DeepSpeed engine
return model.module
# Handle FSDP
if hasattr(model, "_fsdp_wrapped_module"):
# FSDP wrapped model
return model._fsdp_wrapped_module
# Handle DataParallel/DistributedDataParallel
if hasattr(model, "module"):
return model.module
return model
@torch.no_grad()
def on_train_begin(
self, args: TrainingArguments, state: TrainerState, control: TrainerControl, model: PreTrainedModel, **kwargs
):
model = self._unwrap_model(model)
# Create a new instance and load state_dict
self.running_model = type(model)(model.config).to(self.device)
self.running_model.load_state_dict(model.state_dict())
# Cache trainable parameters once in a fixed order
for name, param in model.named_parameters():
if not param.requires_grad:
continue
self.param_names.append(name)
self.thetat_params.append(param)
# Clone θ₀ and EMA on the same device as model
theta0 = param.detach().clone().to(self.device)
self.theta0_params.append(theta0)
self.ema_params.append(theta0.clone()) # initialize EMA with θ₀
def _ema_beta(self, step: int) -> float:
"""Compute the EMA decay factor βₜ = (ρ + γ·t)⁻ᵏᵃᵖᵖᵃ."""
beta = (self.lag + self.multiplier * step) ** (-self.ema_power)
return max(beta, self.min_ema_multiplier)
def _bema_alpha(self, step: int) -> float:
"""Compute the BEMA scaling factor αₜ = (ρ + γ·t)⁻ᵉᵗᵃ."""
return (self.lag + self.multiplier * step) ** (-self.bias_power)
def _update_bema_weights(self, step: int):
beta = self._ema_beta(step)
alpha = self._bema_alpha(step)
# Compute EMA + BEMA in-place and write directly to running_model
for thetat, theta0, ema, run_param in zip(
self.thetat_params, self.theta0_params, self.ema_params, self.running_model.parameters()
):
thetat = thetat.detach().to(self.device)
ema.mul_(1 - beta).add_(thetat, alpha=beta) # EMA update: ema = (1 - beta) * ema + beta * θₜ
run_param.copy_(ema + alpha * (thetat - theta0)) # BEMA update: run_param = ema + alpha * (θₜ - θ₀)
@torch.no_grad()
def on_step_end(
self, args: TrainingArguments, state: TrainerState, control: TrainerControl, model: PreTrainedModel, **kwargs
):
step = state.global_step
# If we haven't reached the update_after step, skip the BEMA update
if step < self.update_after:
return
# Snapshot θ₀ and EMA at first update
if step == self.update_after:
for thetat_param, theta0_param, ema_param in zip(self.thetat_params, self.theta0_params, self.ema_params):
theta0_param.copy_(thetat_param)
ema_param.copy_(thetat_param)
# Update BEMA weights every `update_freq` steps
elif (step - self.update_after) % self.update_freq == 0:
self._update_bema_weights(step)
logger.info(f"Updated BEMA weights at step {step}")
@torch.no_grad()
def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
if state.is_world_process_zero:
save_directory = f"{args.output_dir}/bema"
self.running_model.save_pretrained(save_directory)
logger.info(f"Saved BEMA model to {save_directory}")
| trl/trl/trainer/callbacks.py/0 | {
"file_path": "trl/trl/trainer/callbacks.py",
"repo_id": "trl",
"token_count": 13702
} | 603 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelConfig:
"""
Configuration class for the models.
Using [`~transformers.HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
model_name_or_path (`str` or `None`, *optional*, defaults to `None`):
Model checkpoint for weights initialization.
model_revision (`str`, *optional*, defaults to `"main"`):
Specific model version to use. It can be a branch name, a tag name, or a commit id.
torch_dtype (`Literal["auto", "bfloat16", "float16", "float32"]` or `None`, *optional*, defaults to `None`):
Override the default `torch.dtype` and load the model under this dtype. Possible values are
- `"bfloat16"`: `torch.bfloat16`
- `"float16"`: `torch.float16`
- `"float32"`: `torch.float32`
- `"auto"`: Automatically derive the dtype from the model's weights.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether to allow for custom models defined on the Hub in their own modeling files. This option should only
be set to `True` for repositories you trust and in which you have read the code, as it will execute code
present on the Hub on your local machine.
attn_implementation (`str` or `None`, *optional*, defaults to `None`):
Which attention implementation to use. You can run `--attn_implementation=flash_attention_2`, in which case
you must install this manually by running `pip install flash-attn --no-build-isolation`.
use_peft (`bool`, *optional*, defaults to `False`):
Whether to use PEFT for training.
lora_r (`int`, *optional*, defaults to `16`):
LoRA R value.
lora_alpha (`int`, *optional*, defaults to `32`):
LoRA alpha.
lora_dropout (`float`, *optional*, defaults to `0.05`):
LoRA dropout.
lora_target_modules (`Union[str, list[str]]` or `None`, *optional*, defaults to `None`):
LoRA target modules.
lora_target_parameters (`Union[str, list[str]]` or `None`, *optional*, defaults to `None`):
List of target parameters for LoRA.
lora_modules_to_save (`list[str]` or `None`, *optional*, defaults to `None`):
Model layers to unfreeze & train.
lora_task_type (`str`, *optional*, defaults to `"CAUSAL_LM"`):
Task type to pass for LoRA (use `"SEQ_CLS"` for reward modeling).
use_rslora (`bool`, *optional*, defaults to `False`):
Whether to use Rank-Stabilized LoRA, which sets the adapter scaling factor to `lora_alpha/√r`, instead of
the original default value of `lora_alpha/r`.
use_dora (`bool`, *optional*, defaults to `False`):
Enable [Weight-Decomposed Low-Rank Adaptation (DoRA)](https://huggingface.co/papers/2402.09353). This
technique decomposes the updates of the weights into two parts, magnitude and direction. Direction is
handled by normal LoRA, whereas the magnitude is handled by a separate learnable parameter. This can
improve the performance of LoRA, especially at low ranks. Right now, DoRA only supports linear and Conv2D
layers. DoRA introduces a bigger overhead than pure LoRA, so it is recommended to merge weights for
inference.
load_in_8bit (`bool`, *optional*, defaults to `False`):
Whether to use 8 bit precision for the base model. Works only with LoRA.
load_in_4bit (`bool`, *optional*, defaults to `False`):
Whether to use 4 bit precision for the base model. Works only with LoRA.
bnb_4bit_quant_type (`str`, *optional*, defaults to `"nf4"`):
Quantization type (`"fp4"` or `"nf4"`).
use_bnb_nested_quant (`bool`, *optional*, defaults to `False`):
Whether to use nested quantization.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={"help": "Model checkpoint for weights initialization."},
)
model_revision: str = field(
default="main",
metadata={"help": "Specific model version to use. It can be a branch name, a tag name, or a commit id."},
)
torch_dtype: Optional[str] = field(
default=None,
metadata={
"help": "Override the default `torch.dtype` and load the model under this dtype.",
"choices": ["auto", "bfloat16", "float16", "float32"],
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": "Whether to allow for custom models defined on the Hub in their own modeling files. This option "
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
"execute code present on the Hub on your local machine."
},
)
attn_implementation: Optional[str] = field(
default=None,
metadata={
"help": "Which attention implementation to use. You can run `--attn_implementation=flash_attention_2`, in "
"which case you must install this manually by running `pip install flash-attn --no-build-isolation`."
},
)
use_peft: bool = field(
default=False,
metadata={"help": "Whether to use PEFT for training."},
)
lora_r: int = field(
default=16,
metadata={"help": "LoRA R value."},
)
lora_alpha: int = field(
default=32,
metadata={"help": "LoRA alpha."},
)
lora_dropout: float = field(
default=0.05,
metadata={"help": "LoRA dropout."},
)
lora_target_modules: Optional[list[str]] = field(
default=None,
metadata={"help": "LoRA target modules."},
)
lora_target_parameters: Optional[list[str]] = field(
default=None,
metadata={"help": "List of target parameters for LoRA."},
)
lora_modules_to_save: Optional[list[str]] = field(
default=None,
metadata={"help": "Model layers to unfreeze & train."},
)
lora_task_type: str = field(
default="CAUSAL_LM",
metadata={"help": "Task type to pass for LoRA (use 'SEQ_CLS' for reward modeling)."},
)
use_rslora: bool = field(
default=False,
metadata={
"help": "Whether to use Rank-Stabilized LoRA, which sets the adapter scaling factor to `lora_alpha/√r`, "
"instead of the original default value of `lora_alpha/r`."
},
)
use_dora: bool = field(
default=False,
metadata={
"help": "Enable Weight-Decomposed Low-Rank Adaptation (DoRA). This technique decomposes the updates of "
"the weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the "
"magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA, "
"especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a "
"bigger overhead than pure LoRA, so it is recommended to merge weights for inference."
},
)
load_in_8bit: bool = field(
default=False,
metadata={"help": "Whether to use 8 bit precision for the base model. Works only with LoRA."},
)
load_in_4bit: bool = field(
default=False,
metadata={"help": "Whether to use 4 bit precision for the base model. Works only with LoRA."},
)
bnb_4bit_quant_type: str = field(
default="nf4",
metadata={"help": "Quantization type.", "choices": ["fp4", "nf4"]},
)
use_bnb_nested_quant: bool = field(
default=False,
metadata={"help": "Whether to use nested quantization."},
)
def __post_init__(self):
if self.load_in_8bit and self.load_in_4bit:
raise ValueError("You can't use 8 bit and 4 bit precision at the same time")
if hasattr(self.lora_target_modules, "__len__") and len(self.lora_target_modules) == 1:
self.lora_target_modules = self.lora_target_modules[0]
| trl/trl/trainer/model_config.py/0 | {
"file_path": "trl/trl/trainer/model_config.py",
"repo_id": "trl",
"token_count": 3494
} | 604 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
from collections import defaultdict
from collections.abc import Mapping
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Callable, Optional, TypeVar, Union
import torch
import torch.nn as nn
import transformers
from accelerate import PartialState, logging
from datasets import Dataset, IterableDataset
from transformers import (
AutoConfig,
AutoProcessor,
BaseImageProcessor,
DataCollator,
FeatureExtractionMixin,
PreTrainedModel,
PreTrainedTokenizerBase,
ProcessorMixin,
Trainer,
TrainingArguments,
is_wandb_available,
)
from transformers.data.data_collator import DataCollatorMixin
from transformers.trainer_callback import TrainerCallback
from transformers.trainer_utils import EvalPrediction
from transformers.utils import is_peft_available
from ..data_utils import (
apply_chat_template,
is_conversational,
is_conversational_from_value,
maybe_convert_to_chatml,
pack_dataset,
prepare_multimodal_messages,
truncate_dataset,
)
from ..models import clone_chat_template, get_act_offloading_ctx_manager, prepare_peft_model
from .sft_config import SFTConfig
from .utils import entropy_from_logits, flush_left, generate_model_card, get_comet_experiment_url, pad
if is_peft_available():
from peft import PeftConfig, PeftModel
if is_wandb_available():
import wandb
logger = logging.get_logger(__name__)
TListOrMapping = TypeVar("TListOrMapping", list, Mapping)
def remove_none_values(example: TListOrMapping) -> TListOrMapping:
"""
Recursively removes entries with `None` values from a nested structure (list or dictionary).
Args:
example (`list` or `Mapping`):
Input nested structure (list or dictionary) from which to remove `None`.
Example:
```python
>>> [
... {
... "a": {"aa": None, "ab": 1},
... "b": "my_string",
... }
... ]
>>> remove_none_values(example)
[{'a': {'ab': 1}, 'b': 'my_string'}]
```
"""
if isinstance(example, list):
return [remove_none_values(value) if isinstance(value, (dict, list)) else value for value in example]
elif isinstance(example, Mapping):
return {
key: remove_none_values(value) if isinstance(value, (dict, list)) else value
for key, value in example.items()
if value is not None
}
else:
raise TypeError("Input must be a list or a dictionary.")
@dataclass
class DataCollatorForLanguageModeling(DataCollatorMixin):
"""
Data collator used for language modeling data. Inputs are dynamically padded to the maximum length of a batch.
This collator expects each example in the input list to be a dictionary containing at least the `"input_ids"` key.
If the input contains a `"completion_mask"`, it is used to set the labels to `-100` for tokens that are not in the
completion. If `"assistant_masks"` are present, they are used to set the labels to `-100` for tokens that are not
in the assistant part of the sequence. The collator returns a dictionary containing the following keys:
- `"input_ids"`: Tensor of input IDs, padded to the maximum length of the batch.
- `"attention_mask"`: Tensor of attention mask, padded to the maximum length of the batch.
- `"position_ids"`: Tensor of position IDs, padded to the maximum length of the batch.
- `"labels"`: Tensor of labels, padded to the maximum length of the batch. If `completion_only_loss` is set to
`True`, tokens that are not in the completion are set to -100. If `assistant_masks` are present, tokens that are
not in the assistant part of the sequence are set to -100.
Args:
pad_token_id (`int`):
Token ID to use for padding.
completion_only_loss (`bool`, *optional*, defaults to `True`):
When the input contains a completion mask (`completion_mask`), the labels are set to -100 for the tokens
that are no in the completion.
padding_free (`bool`, *optional*, defaults to `False`):
If set to `True`, the sequences will be flattened into a single sequence, and the position IDs will be
generated accordingly. The attention mask will be set to 1 for all tokens.
pad_to_multiple_of (`int` or `None`, *optional*, defaults to `None`):
If set, the sequences will be padded to a multiple of this value.
return_tensors (`str`, *optional*, defaults to `"pt"`):
Type of Tensor to return. Only `"pt"` is currently supported.
Examples:
```python
>>> from trl import DataCollatorForLanguageModeling
>>> collator = DataCollatorForLanguageModeling(pad_token_id=0)
>>> examples = [{"input_ids": [1, 2, 3]}, {"input_ids": [4, 5]}]
>>> collator(examples)
{'input_ids': tensor([[ 1, 2, 3],
[ 4, 5, 0]]),
'attention_mask': tensor([[ 1, 1, 1],
[ 1, 1, 0]]),
'position_ids': tensor([[0, 1, 2],
[0, 1, 0]]),
'labels': tensor([[ 1, 2, 3],
[ 4, 5, -100]])}
>>> # With completion mask
>>> examples = [
... {"input_ids": [1, 2, 3], "completion_mask": [0, 1, 1]},
... {"input_ids": [4, 5], "completion_mask": [0, 1]},
... ]
>>> collator(examples)
{'input_ids': tensor([[ 1, 2, 3],
[ 4, 5, 0]]),
'attention_mask': tensor([[ 1, 1, 1],
[ 1, 1, 0]]),
'position_ids': tensor([[0, 1, 2],
[0, 1, 0]]),
'labels': tensor([[-100, 2, 3],
[-100, 5, -100]])}
>>> # With padding_free
>>> collator = DataCollatorForLanguageModeling(pad_token_id=0, padding_free=True)
>>> collator(examples)
{'input_ids': tensor([[ 1, 2, 3, 4, 5]]),
'attention_mask': tensor([[1, 1, 1, 1, 1]]),
'position_ids': tensor([[0, 1, 2, 0, 1]]),
'labels': tensor([[1, 2, 3, 4, 5]])}
```
"""
pad_token_id: int
completion_only_loss: bool = True
padding_free: bool = False
return_position_ids: bool = True
pad_to_multiple_of: Optional[int] = None
return_tensors: str = "pt"
def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
# Convert to tensor
input_ids = [torch.tensor(example["input_ids"]) for example in examples]
# Check if we have meaningful seq_lengths from packing (restarting sequences)
has_packed_position_ids = self.return_position_ids and "seq_lengths" in examples[0] and self.padding_free
# For packing with position_ids, we should NOT create attention_mask as it causes
# FlashAttention to ignore position_ids and compute wrong cu_seq_lens from the all-1s mask
if not has_packed_position_ids:
attention_mask = [torch.ones_like(input_ids) for input_ids in input_ids]
if self.return_position_ids:
if "seq_lengths" in examples[0]:
position_ids = self.get_position_ids_from_packed_seq_lengths(
[example["seq_lengths"] for example in examples]
)
else:
position_ids = [torch.arange(len(ids)) for ids in input_ids]
if "labels" in examples[0]:
labels = [torch.tensor(example["labels"]) for example in examples]
else:
labels = [torch.tensor(example["input_ids"]) for example in examples]
if self.completion_only_loss and "completion_mask" in examples[0]:
completion_mask = [torch.tensor(example["completion_mask"]) for example in examples]
if "assistant_masks" in examples[0]:
assistant_masks = [torch.tensor(example["assistant_masks"]) for example in examples]
# Pad
output = {}
if self.padding_free:
output["input_ids"] = torch.cat(input_ids, dim=0).unsqueeze(0)
if not has_packed_position_ids:
output["attention_mask"] = torch.cat(attention_mask, dim=0).unsqueeze(0)
if self.return_position_ids:
output["position_ids"] = torch.cat(position_ids, dim=0).unsqueeze(0)
output["labels"] = torch.cat(labels, dim=0).unsqueeze(0)
if self.completion_only_loss and "completion_mask" in examples[0]:
completion_mask = torch.cat(completion_mask, dim=0).unsqueeze(0)
output["labels"][completion_mask == 0] = -100
if "assistant_masks" in examples[0]:
assistant_masks = torch.cat(assistant_masks, dim=0).unsqueeze(0)
output["labels"][assistant_masks == 0] = -100
else:
output["input_ids"] = pad(
input_ids,
padding_value=self.pad_token_id,
padding_side="right",
pad_to_multiple_of=self.pad_to_multiple_of,
)
output["attention_mask"] = pad(
attention_mask, padding_value=0, padding_side="right", pad_to_multiple_of=self.pad_to_multiple_of
)
if self.return_position_ids:
output["position_ids"] = pad(
position_ids, padding_value=0, padding_side="right", pad_to_multiple_of=self.pad_to_multiple_of
)
output["labels"] = pad(
labels, padding_value=-100, padding_side="right", pad_to_multiple_of=self.pad_to_multiple_of
)
if self.completion_only_loss and "completion_mask" in examples[0]:
completion_mask = pad(
completion_mask, padding_value=0, padding_side="right", pad_to_multiple_of=self.pad_to_multiple_of
)
output["labels"][completion_mask == 0] = -100 # mask everything that is not in the completion
if "assistant_masks" in examples[0]:
assistant_masks = pad(
assistant_masks, padding_value=0, padding_side="right", pad_to_multiple_of=self.pad_to_multiple_of
)
output["labels"][assistant_masks == 0] = -100
return output
@staticmethod
def get_position_ids_from_packed_seq_lengths(batch_seq_lengths: list[list[int]]) -> list[torch.Tensor]:
"""
Get position IDs for packed sequences.
Args:
batch_seq_lengths (`list[list[int]]`):
A list of lists containing the lengths of each individual document in the packed batch.
Return:
`list[torch.Tensor]`:
A list of tensors containing the position IDs for each packed sequence.
"""
# Get lengths per row
example_lengths = [sum(seq_lengths) for seq_lengths in batch_seq_lengths]
# Flat list of lengths
batch_seq_lengths = torch.tensor(
[seq_length for seq_lengths in batch_seq_lengths for seq_length in seq_lengths]
)
position_ids = torch.ones(sum(example_lengths), dtype=batch_seq_lengths.dtype)
position_ids[0] = 0
# Reset position ids to 0 at the start of each sequence
position_ids[batch_seq_lengths[:-1].cumsum(0)] = -(batch_seq_lengths[:-1] - 1)
position_ids = position_ids.cumsum(0)
# Split back into one tensor per example
return list(position_ids.split(example_lengths))
@dataclass
class DataCollatorForVisionLanguageModeling(DataCollatorMixin):
"""
Data collator for vision-language modeling tasks.
Unlike text-only datasets—where the collator typically receives pre-tokenized inputs ready for batching,
vision-language data processing involves converting images into pixel values. This conversion is disk-intensive,
making upfront preprocessing of the entire dataset impractical. Therefore, this collator performs tokenization and
image processing on-the-fly to efficiently prepare batches.
Each input example should be a dictionary containing at least:
- An `"images"` key holding the image data.
- [language modeling](#language-modeling) type: either a `"messages"` key for conversational inputs or a `"text"`
key for standard text inputs.
- [prompt-completion](#prompt-completion) type: keys `"prompt"` and `"completion"` for the prompt and completion.
The collator outputs a dictionary including:
- `"input_ids"`: Tensor of token IDs.
- `"attention_mask"`: Tensor indicating attention mask.
- `"pixel_values"`: Tensor representing image pixel values.
- `"labels"`: Tensor for training labels.
Additional keys may be present depending on the processor, such as `"image_grid_thw"`.
Args:
processor (`ProcessorMixin`):
The processor used to tokenize text and process images. It must be a subclass of `ProcessorMixin` and
include a `tokenizer` with a defined `pad_token_id`.
max_length (`int` or `None`, optional, defaults to `None`):
Maximum sequence length for input tokens. If `None`, no truncation is applied.
completion_only_loss (`bool`, *optional*, defaults to `False`):
Whether to compute loss only on the completion part of the sequence. When `True`, the labels for the prompt
part are set to -100. It requires the dataset type to be prompt-completion.
pad_to_multiple_of (`int` or `None`, optional, defaults to `None`):
If set, the sequences will be padded to a multiple of this value.
dataset_text_field (`str`, optional, defaults to `"text"`):
Name of the column that contains text data in the dataset. This parameter is only relevant for [standard
datasets format](dataset_formats#standard).
return_tensors (`str`, optional, defaults to `"pt"`):
The tensor type to return. Currently, only `"pt"` (PyTorch tensors) is supported.
Example:
```python
>>> from trl import DataCollatorForVisionLanguageModeling
>>> from transformers import AutoProcessor
>>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
>>> collator = DataCollatorForVisionLanguageModeling(processor)
>>> examples = [
... {"images": [Image.open("image_0.png")], "messages": [{"role": "user", "content": "What is this?"}]},
... {"images": [Image.open("image_1.png")], "messages": [{"role": "user", "content": "Describe this image."}]},
... ]
>>> collator(examples)
{'input_ids': tensor([[151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198,
151644, 872, 198, 151652, 151655, 151655, 151655, 151655, 151653, 3838, 374,
419, 30, 151645, 198],
[151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198,
151644, 872, 198, 151652, 151655, 151655, 151655, 151655, 151653, 74785, 419,
2168, 13, 151645, 198]]),
'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]),
'pixel_values': tensor([[-0.9893, 0.1785, 1.5362, ..., -0.0582, 0.8661, -0.2431],
[-0.2302, 0.9522, -1.1061, ..., 0.0555, 1.3354, -0.6412],
[ 1.2150, 0.9084, 0.7041, ..., 0.2404, -0.8403, -0.5133],
...,
[ 0.6895, 0.2807, 0.2515, ..., -0.2004, -1.2100, 0.0555],
[ 0.8209, -0.9748, 1.5654, ..., 1.6055, -0.4706, 0.5817],
[-1.0915, 0.4559, 0.9230, ..., 0.5106, 0.0982, -0.1720]]),
'image_grid_thw': tensor([[1, 4, 4],
[1, 4, 4]]),
'labels': tensor([[151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198,
151644, 872, 198, 151652, 151655, 151655, 151655, 151655, 151653, 3838, 374,
419, 30, 151645, 198],
[151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198,
151644, 872, 198, 151652, 151655, 151655, 151655, 151655, 151653, 74785, 419,
2168, 13, 151645, 198]])}
```
"""
processor: ProcessorMixin
max_length: Optional[int] = None
completion_only_loss: bool = False # default not used in practice; SFTTrainer always passes the relevant value
pad_to_multiple_of: Optional[int] = None
dataset_text_field: str = "text"
return_tensors: str = "pt"
def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
if "messages" in examples[0] or self.dataset_text_field in examples[0]:
if self.completion_only_loss:
raise ValueError(
"The `completion_only_loss` argument is not supported for language modeling datasets."
)
return self._collate_language_modeling(examples)
elif "prompt" in examples[0] and "completion" in examples[0]:
return self._collate_prompt_completion(examples)
else:
raise KeyError(f"Unexpected input keys in examples: {list(examples[0].keys())}.")
def _collate_language_modeling(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
images = [example["images"] for example in examples]
if "messages" in examples[0]: # conversational case
for example in examples:
prepare_multimodal_messages(example["messages"], len(example["images"]))
messages = [example["messages"] for example in examples]
texts = self.processor.apply_chat_template(messages)
elif self.dataset_text_field in examples[0]: # standard case
texts = [example[self.dataset_text_field] for example in examples]
else:
raise KeyError(
"The input examples must contain either 'messages' for conversational data or 'text' for standard "
"data."
)
output = self.processor(
images=images,
text=texts,
padding=True,
padding_side="right",
pad_to_multiple_of=self.pad_to_multiple_of,
truncation=self.max_length is not None,
max_length=self.max_length,
return_tensors=self.return_tensors,
add_special_tokens=False, # to avoid adding the BOS, twice see https://huggingface.co/blog/qgallouedec/gotchas-in-tokenizer-behavior#7-chat-template-and-tokenization-dont-compose-due-to-special-tokens
)
labels = output["input_ids"].clone()
labels[output["attention_mask"] == 0] = -100
# We mask only padding tokens (-100) in the labels. Vision tokens are left unchanged because their handling in
# loss computation has to be done by the model, and masking them here would be infeasible in practice as vision
# token definitions vary across architectures.
output["labels"] = labels
return output
def _collate_prompt_completion(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
if self.pad_to_multiple_of is not None:
raise NotImplementedError(
"Padding to a multiple of a value is not yet implemented for vision-language modeling and "
"prompt-completion data yet."
)
images = [example["images"] for example in examples]
if is_conversational(examples[0]): # conversational case
for example in examples:
prepare_multimodal_messages(example["prompt"] + example["completion"], len(example["images"]))
examples = [apply_chat_template(example, self.processor) for example in examples]
prompts = [example["prompt"] for example in examples]
completions = [example["completion"] for example in examples]
processed_prompts = self.processor(
images=images,
text=prompts,
padding=True,
padding_side="left",
return_tensors=self.return_tensors,
add_special_tokens=False, # to avoid adding the BOS, twice see https://huggingface.co/blog/qgallouedec/gotchas-in-tokenizer-behavior#7-chat-template-and-tokenization-dont-compose-due-to-special-tokens
)
processed_completions = self.processor(
text=completions,
padding=True,
padding_side="right",
return_tensors=self.return_tensors,
add_special_tokens=False, # to avoid adding the BOS, twice see https://huggingface.co/blog/qgallouedec/gotchas-in-tokenizer-behavior#7-chat-template-and-tokenization-dont-compose-due-to-special-tokens
)
# Concatenate prompts and completions
prompt_ids, completion_ids = processed_prompts["input_ids"], processed_completions["input_ids"]
prompt_mask, completion_mask = processed_prompts["attention_mask"], processed_completions["attention_mask"]
input_ids = torch.cat((prompt_ids, completion_ids), dim=1)
attention_mask = torch.cat((prompt_mask, completion_mask), dim=1)
completion_mask = torch.cat((torch.zeros_like(prompt_mask), completion_mask), dim=1)
# Flush left to reduce padding
attention_mask, input_ids, completion_mask = flush_left(attention_mask, input_ids, completion_mask)
# Truncate if necessary
if self.max_length is not None:
input_ids = input_ids[:, : self.max_length]
attention_mask = attention_mask[:, : self.max_length]
completion_mask = completion_mask[:, : self.max_length]
# Create labels and mask padding tokens
labels = input_ids.clone()
labels[attention_mask == 0] = -100
if self.completion_only_loss:
labels[completion_mask == 0] = -100
# Build the output dictionary
output = processed_prompts # we take processed_prompts because it contains the images
output["input_ids"] = input_ids
output["attention_mask"] = attention_mask
output["labels"] = labels
return output
class SFTTrainer(Trainer):
"""
Trainer for Supervised Fine-Tuning (SFT) method.
This class is a wrapper around the [`transformers.Trainer`] class and inherits all of its attributes and methods.
Example:
```python
from datasets import load_dataset
from trl import SFTTrainer
dataset = load_dataset("roneneldan/TinyStories", split="train[:1%]")
trainer = SFTTrainer(model="Qwen/Qwen2-0.5B-Instruct", train_dataset=dataset)
trainer.train()
```
Args:
model (`Union[str, PreTrainedModel]`):
Model to be trained. Can be either:
- A string, being the *model id* of a pretrained model hosted inside a model repo on huggingface.co, or a
path to a *directory* containing model weights saved using
[`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is loaded
using [`~transformers.AutoModelForCausalLM.from_pretrained`] with the keyword arguments in
`args.model_init_kwargs`.
- A [`~transformers.PreTrainedModel`] object. Only causal language models are supported.
args ([`SFTConfig`], *optional*, defaults to `None`):
Configuration for this trainer. If `None`, a default configuration is used.
data_collator (`DataCollator`, *optional*):
Function to use to form a batch from a list of elements of the processed `train_dataset` or `eval_dataset`.
Will default to a custom [`DataCollatorForLanguageModeling`].
train_dataset ([`~datasets.Dataset`] or [`~datasets.IterableDataset`]):
Dataset to use for training. SFT supports both [language modeling](#language-modeling) type and
[prompt-completion](#prompt-completion) type. The format of the samples can be either:
- [Standard](dataset_formats#standard): Each sample contains plain text.
- [Conversational](dataset_formats#conversational): Each sample contains structured messages (e.g., role
and content).
The trainer also supports processed datasets (tokenized) as long as they contain an `input_ids` field.
eval_dataset ([`~datasets.Dataset`], [`~datasets.IterableDataset`] or `dict[str, Union[Dataset,
IterableDataset]]`):
Dataset to use for evaluation. It must meet the same requirements as `train_dataset`.
processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.ProcessorMixin`] or `None`, *optional*, defaults to `None`):
Processing class used to process the data. If `None`, the processing class is loaded from the model's name
with [`~transformers.AutoProcessor.from_pretrained`]. A padding token, `tokenizer.pad_token`, must be set.
If the processing class has not set a padding token, `tokenizer.eos_token` will be used as the default.
callbacks (list of [`~transformers.TrainerCallback`], *optional*, defaults to `None`):
List of callbacks to customize the training loop. Will add those to the list of default callbacks detailed
in [here](https://huggingface.co/docs/transformers/main_classes/callback).
If you want to remove one of the default callbacks used, use the [`~transformers.Trainer.remove_callback`]
method.
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None,
None)`):
A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your
model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
optimizer_cls_and_kwargs (`Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]`, *optional*, defaults to
`None`):
A tuple containing the optimizer class and keyword arguments to use. Overrides `optim` and `optim_args` in
`args`. Incompatible with the `optimizers` argument.
Unlike `optimizers`, this argument avoids the need to place model parameters on the correct devices before
initializing the Trainer.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*, defaults to
`None`):
A function that preprocess the logits right before caching them at each evaluation step. Must take two
tensors, the logits and the labels, and return the logits once processed as desired. The modifications made
by this function will be reflected in the predictions received by `compute_metrics`.
Note that the labels (second parameter) will be `None` if the dataset does not have them.
peft_config ([`~peft.PeftConfig`], *optional*, defaults to `None`):
PEFT configuration used to wrap the model. If `None`, the model is not wrapped.
formatting_func (`Optional[Callable]`):
Formatting function applied to the dataset before tokenization. Applying the formatting function explicitly
converts the dataset into a [language modeling](#language-modeling) type.
"""
_tag_names = ["trl", "sft"]
def __init__(
self,
model: Union[str, nn.Module, PreTrainedModel],
args: Optional[Union[SFTConfig, TrainingArguments]] = None,
data_collator: Optional[DataCollator] = None, # type: ignore
train_dataset: Optional[Union[Dataset, IterableDataset]] = None,
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
processing_class: Optional[Union[PreTrainedTokenizerBase, ProcessorMixin]] = None,
compute_loss_func: Optional[Callable] = None,
compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None,
callbacks: Optional[list[TrainerCallback]] = None,
optimizers: tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]] = (None, None),
optimizer_cls_and_kwargs: Optional[tuple[type[torch.optim.Optimizer], dict[str, Any]]] = None,
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
peft_config: Optional["PeftConfig"] = None,
formatting_func: Optional[Callable[[dict], str]] = None,
):
# Args
if args is None:
model_name = model if isinstance(model, str) else model.config._name_or_path
model_name = model_name.split("/")[-1]
args = SFTConfig(f"{model_name}-SFT")
elif isinstance(args, TrainingArguments) and not isinstance(args, SFTConfig):
dict_args = args.to_dict()
dict_args["hub_token"] = args.hub_token # to_dict hides the hub_token
dict_args.pop("push_to_hub_token")
args = SFTConfig(**dict_args)
# Model
model_init_kwargs = args.model_init_kwargs or {}
if isinstance(model, str):
model_id = model
torch_dtype = model_init_kwargs.get("torch_dtype")
if isinstance(torch_dtype, torch.dtype) or torch_dtype == "auto" or torch_dtype is None:
pass # torch_dtype is already a torch.dtype or "auto" or None
elif isinstance(torch_dtype, str) and torch_dtype in ["bfloat16", "float16", "float32"]:
torch_dtype = getattr(torch, torch_dtype)
model_init_kwargs["torch_dtype"] = torch_dtype
else:
raise ValueError(
"Invalid `torch_dtype` passed to `SFTConfig`. Expected either 'auto' or a string representing "
f"a valid `torch.dtype` (e.g., 'float32'), but got {torch_dtype}."
)
config = AutoConfig.from_pretrained(model_id)
architecture = getattr(transformers, config.architectures[0])
model = architecture.from_pretrained(model_id, **model_init_kwargs)
else:
model_id = model.config._name_or_path
if args.model_init_kwargs is not None:
logger.warning(
"You passed `model_init_kwargs` to the `SFTConfig`, but your model is already instantiated. "
"The `model_init_kwargs` will be ignored."
)
# Processing class
if processing_class is None:
processing_class = AutoProcessor.from_pretrained(model_id)
# Handle pad token for processors or tokenizers
if isinstance(processing_class, ProcessorMixin):
tokenizer = processing_class.tokenizer
self._is_vlm = True
elif isinstance(processing_class, PreTrainedTokenizerBase):
tokenizer = processing_class
self._is_vlm = False
else:
raise TypeError("The `processing_class` must be either a `PreTrainedTokenizerBase` or a `ProcessorMixin`")
if args.eos_token is not None:
eos_token = args.eos_token
eos_token_id = tokenizer.convert_tokens_to_ids(eos_token)
if eos_token_id is None:
raise ValueError(
f"The specified `eos_token` ('{eos_token}') is not found in the vocabulary of the given "
f"`processing_class` ({processing_class.__class__.__name__}). Ensure that the `eos_token` exists "
"in the vocabulary before using it as an EOS token."
)
tokenizer.eos_token_id = eos_token_id
if args.chat_template_path is not None:
if os.path.isfile(args.chat_template_path) and args.chat_template_path.endswith((".jinja", ".j2")):
with open(args.chat_template_path, encoding="utf-8") as chat_template_file:
processing_class.chat_template = chat_template_file.read()
added_tokens = []
else:
model, processing_class, added_tokens = clone_chat_template(
model, processing_class, args.chat_template_path
)
else:
added_tokens = []
# Catch some wrong configurations related to VLMs
if self._is_vlm and args.packing:
raise ValueError(
"Packing is not supported for vision-language models. Please set `packing=False` in the SFTConfig."
)
if self._is_vlm and args.padding_free:
raise ValueError(
"Padding-free training is yet not supported for vision-language models. Please set "
"`padding_free=False` in the `SFTConfig`."
)
if self._is_vlm and args.assistant_only_loss:
raise ValueError(
"Assistant-only loss is not yet supported for vision-language models. Please set "
"`assistant_only_loss=False` in the `SFTConfig`."
)
# PEFT configuration and model wrapping
if peft_config is not None:
if added_tokens:
# Ensure that the added tokens are trainable
if peft_config.trainable_token_indices is None:
peft_config.trainable_token_indices = {"embed_tokens": added_tokens}
elif "embed_tokens" not in peft_config.trainable_token_indices:
peft_config.trainable_token_indices["embed_tokens"] = added_tokens
else:
peft_config.trainable_token_indices["embed_tokens"].extend(added_tokens)
# Ensure that the lm_head is trainable
if peft_config.modules_to_save is None or "lm_head" not in peft_config.modules_to_save:
logger.warning(
"Cloning chat template added new tokens to the tokenizer, but 'lm_head' is not in PEFT's "
"`modules_to_save`. As a result, the model may not learn to generate outputs with these new "
"tokens, leading to degraded generation quality. To fix this, add "
"`modules_to_save=['lm_head']` to your PEFT configuration."
)
if peft_config.modules_to_save is None:
peft_config.modules_to_save = ["lm_head"]
else:
peft_config.modules_to_save.append("lm_head")
# In Prompt Tuning a small set of trainable virtual tokens (continuous prompt embeddings) is prepended to the
# input. We store the number of these tokens so we can account for them correctly when calculating accuracy.
self.num_virtual_tokens = 0
if peft_config is not None or (is_peft_available() and isinstance(model, PeftModel)):
model = prepare_peft_model(model, peft_config, args)
if model.active_adapter in model.peft_config:
peft_model_config = model.peft_config[model.active_adapter]
self.num_virtual_tokens = getattr(peft_model_config, "num_virtual_tokens", 0)
# Data collator
# BFD packing requires padding-free mode; otherwise, the collator outputs padded attention masks, causing
# FlashAttention to ignore position_ids and recompute them incorrectly from the padded attention mask.
self.padding_free = args.padding_free or (args.packing and args.packing_strategy == "bfd")
use_flash_attention = model.config._attn_implementation in [
"flash_attention_2",
"kernels-community/vllm-flash-attn3",
]
if self.padding_free:
if data_collator is not None:
raise ValueError("Passing a custom data collator is not supported when using padding-free.")
if args.packing and args.packing_strategy == "wrapped":
logger.warning(
"You are passing `padding_free=True` with the 'wrapped' packing strategy, which is not "
"recommended. Please refer to the documentation to understand why this is not recommended."
)
if not use_flash_attention:
logger.warning(
"Padding-free training is enabled, but the attention implementation is not set to "
"'flash_attention_2'. Padding-free training flattens batches into a single sequence, and "
"'flash_attention_2' is the only known attention mechanism that reliably supports this. Using "
"other implementations may lead to unexpected behavior. To ensure compatibility, set "
"`attn_implementation='flash_attention_2'` in the model configuration, or verify that your "
"attention mechanism can handle flattened sequences."
)
if args.per_device_train_batch_size == 1 and not args.packing:
logger.warning(
"You are using a per_device_train_batch_size of 1 with padding-free training. Using a batch size "
"of 1 anihilate the benefits of padding-free training. Please consider increasing the batch size "
"to at least 2."
)
# Decide whether to use completion-only loss: if not specified, then it is set to True if the dataset format
# is prompt-completion, and False if the dataset format is language modeling.
dataset_sample = next(iter(train_dataset))
if args.completion_only_loss is None:
self.completion_only_loss = "prompt" in dataset_sample and "completion" in dataset_sample
else:
self.completion_only_loss = args.completion_only_loss
if data_collator is None and not self._is_vlm:
# Get the pad token: if not provided, use the one from the processing class or the eos token
# if the processing class does not have a pad token.
pad_token = args.pad_token or tokenizer.pad_token or tokenizer.eos_token
pad_token_id = tokenizer.convert_tokens_to_ids(pad_token)
if pad_token_id is None:
raise ValueError(
f"The specified `pad_token` ('{pad_token}') is not found in the vocabulary of the given "
f"`processing_class` ({processing_class.__class__.__name__}). Ensure that the `pad_token` exists "
"in the vocabulary before using it as a padding token."
)
data_collator = DataCollatorForLanguageModeling(
pad_token_id=pad_token_id,
completion_only_loss=self.completion_only_loss,
padding_free=self.padding_free,
# Using position_ids without flash_attn hurts the training
return_position_ids=use_flash_attention,
pad_to_multiple_of=args.pad_to_multiple_of,
)
elif data_collator is None and self._is_vlm:
data_collator = DataCollatorForVisionLanguageModeling(
processor=processing_class,
max_length=args.max_length,
completion_only_loss=self.completion_only_loss,
pad_to_multiple_of=args.pad_to_multiple_of,
dataset_text_field=args.dataset_text_field,
)
if args.packing and args.packing_strategy == "bfd" and not use_flash_attention:
logger.warning(
"You are using packing, but the attention implementation is not set to 'flash_attention_2' or "
"'kernels-community/vllm-flash-attn3'. Packing flattens batches into a single sequence, and Flash "
"Attention is the only known attention mechanisms that reliably support this. Using other "
"implementations may lead to cross-contamination between batches. To avoid this, either disable "
"packing by setting `packing=False`, or set `attn_implementation='flash_attention_2'` or "
"`attn_implementation='kernels-community/vllm-flash-attn3'` in the model configuration."
)
if args.assistant_only_loss and not is_conversational(dataset_sample):
raise ValueError(
"You set `assistant_only_loss=True`, but the dataset is not conversational. This option is only "
"supported for conversational datasets."
)
# Dataset
# Skip dataset preparation if `skip_prepare_dataset=True` in `dataset_kwargs`, or if it's a VLM, where
# preprocessing (e.g., image-to-pixel conversion) is too costly and done on the fly instead.
skip_prepare_dataset = (
args.dataset_kwargs is not None and args.dataset_kwargs.get("skip_prepare_dataset", False) or self._is_vlm
)
if not skip_prepare_dataset:
if self.completion_only_loss and formatting_func:
raise ValueError(
"A formatting function was provided while `completion_only_loss=True`, which is incompatible. "
"Using a formatter converts the dataset to a language modeling type, conflicting with "
"completion-only loss. To resolve this, apply your formatting function before passing the "
"dataset, or disable `completion_only_loss` in `SFTConfig`."
)
train_dataset = self._prepare_dataset(
train_dataset, processing_class, args, args.packing, formatting_func, "train"
)
if eval_dataset is not None:
packing = args.packing if args.eval_packing is None else args.eval_packing
if isinstance(eval_dataset, dict):
eval_dataset = {
key: self._prepare_dataset(dataset, processing_class, args, packing, formatting_func, key)
for key, dataset in eval_dataset.items()
}
else:
eval_dataset = self._prepare_dataset(
eval_dataset, processing_class, args, packing, formatting_func, "eval"
)
# Initialize the metrics
self._metrics = {"train": defaultdict(list), "eval": defaultdict(list)}
self._total_train_tokens = 0
# Initialize the Trainer. Parent class will handle:
# - DeepSpeed configuration (through create_accelerator_and_postprocess)
# - FSDP setup
# - Distributed training setup
# - Optimizer and scheduler creation
super().__init__(
model=model,
args=args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
processing_class=processing_class,
compute_loss_func=compute_loss_func,
compute_metrics=compute_metrics,
callbacks=callbacks,
optimizers=optimizers,
optimizer_cls_and_kwargs=optimizer_cls_and_kwargs,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
)
# Initialize activation offloading context
if self.args.activation_offloading:
self.maybe_activation_offload_context = get_act_offloading_ctx_manager(model=self.model)
else:
self.maybe_activation_offload_context = contextlib.nullcontext()
# Add tags for models that have been loaded with the correct transformers version
if hasattr(self.model, "add_model_tags"):
self.model.add_model_tags(self._tag_names)
def _prepare_dataset(
self,
dataset: Union[Dataset, IterableDataset],
processing_class: Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin],
args: SFTConfig,
packing: bool,
formatting_func: Optional[Callable[[dict], str]],
dataset_name: str,
) -> Union[Dataset, IterableDataset]:
# Tabular backends like Arrow/Parquet insert `None` for mismatched keys in nested structures. Clean them from
# sampled data.
if isinstance(dataset, Dataset): # IterableDataset does not support `with_transform`
dataset = dataset.with_transform(remove_none_values)
# If the dataset is already preprocessed (tokenized), skip the processing steps.
column_names = list(next(iter(dataset)).keys())
is_processed = "input_ids" in column_names
# Build the kwargs for the `map` function
map_kwargs = {}
if isinstance(dataset, Dataset): # IterableDataset does not support num_proc
map_kwargs["num_proc"] = args.dataset_num_proc
with PartialState().main_process_first():
# Apply the formatting function if any
if formatting_func is not None and is_processed:
logger.warning(
"You passed a dataset that is already processed (contains an `input_ids` field) together with a "
"formatting function. Therefore `formatting_func` will be ignored. Either remove the "
"`formatting_func` or pass a dataset that is not already processed.",
)
if formatting_func is not None and not is_processed:
if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc`
map_kwargs["desc"] = f"Applying formatting function to {dataset_name} dataset"
def _func(example):
return {"text": formatting_func(example)}
dataset = dataset.map(_func, batched=False, **map_kwargs)
if not is_processed:
# Convert the dataset to ChatML if needed
first_example = next(iter(dataset))
if is_conversational_from_value(first_example):
if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc`
map_kwargs["desc"] = f"Converting {dataset_name} dataset to ChatML"
column_names = next(iter(dataset)).keys()
dataset = dataset.map(
maybe_convert_to_chatml,
remove_columns="conversations" if "conversations" in column_names else None,
**map_kwargs,
)
# Apply the chat template if needed
first_example = next(iter(dataset))
if not is_conversational(first_example):
if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc`
map_kwargs["desc"] = f"Adding EOS to {dataset_name} dataset"
def add_eos(example, eos_token):
if "text" in example and not example["text"].endswith(eos_token): # language modeling case
example["text"] = example["text"] + eos_token
elif "completion" in example and not example["completion"].endswith(eos_token):
example["completion"] = example["completion"] + eos_token
return example
dataset = dataset.map(
add_eos,
fn_kwargs={"eos_token": processing_class.eos_token},
remove_columns="messages" if "messages" in column_names else None, # renamed to "text"
**map_kwargs,
)
# Tokenize the dataset
if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc`
map_kwargs["desc"] = f"Tokenizing {dataset_name} dataset"
def tokenize(example, processing_class, dataset_text_field, assistant_only_loss):
if "prompt" in example: # prompt-completion case
output = {}
if is_conversational(example):
prompt_ids = processing_class.apply_chat_template(
example["prompt"],
tools=example.get("tools"),
**example.get("chat_template_kwargs", {}),
)
prompt_completion_processed = processing_class.apply_chat_template(
example["prompt"] + example["completion"],
return_dict=True,
return_assistant_tokens_mask=assistant_only_loss,
tools=example.get("tools"),
**example.get("chat_template_kwargs", {}),
)
prompt_completion_ids = prompt_completion_processed["input_ids"]
if "assistant_masks" in prompt_completion_processed:
output["assistant_masks"] = prompt_completion_processed["assistant_masks"]
else:
prompt_ids = processing_class(text=example["prompt"])["input_ids"]
prompt_completion_ids = processing_class(text=example["prompt"] + example["completion"])[
"input_ids"
]
# Check if the tokenized prompt starts with the tokenized prompt+completion
if not prompt_completion_ids[: len(prompt_ids)] == prompt_ids:
logger.warning(
"Mismatch between tokenized prompt and the start of tokenized prompt+completion. "
"This may be due to unexpected tokenizer behavior, whitespace issues, or special "
"token handling. Verify that the tokenizer is processing text consistently."
)
# Create a completion mask
completion_mask = [0] * len(prompt_ids) + [1] * (len(prompt_completion_ids) - len(prompt_ids))
output["input_ids"] = prompt_completion_ids
output["completion_mask"] = completion_mask
else: # language modeling case
if is_conversational(example):
processed = processing_class.apply_chat_template(
example["messages"],
return_dict=True,
return_assistant_tokens_mask=assistant_only_loss,
tools=example.get("tools"),
**example.get("chat_template_kwargs", {}),
)
if "assistant_masks" in processed and 1 not in processed["assistant_masks"]:
raise RuntimeError(
"You're using `assistant_only_loss=True`, but at least one example has no "
"assistant tokens. This usually means the tokenizer's chat template doesn't "
"generate assistant masks — it may be missing the `{% generation %}` keyword. Please "
"check the template and ensure it's correctly configured to support assistant "
"masking."
)
output = {k: processed[k] for k in ("input_ids", "assistant_masks") if k in processed}
else:
output = {"input_ids": processing_class(text=example[dataset_text_field])["input_ids"]}
return output
dataset = dataset.map(
tokenize,
fn_kwargs={
"processing_class": processing_class,
"dataset_text_field": args.dataset_text_field,
"assistant_only_loss": args.assistant_only_loss,
},
**map_kwargs,
)
# Pack or truncate
if packing:
if args.max_length is None:
raise ValueError("When packing is enabled, `max_length` can't be `None`.")
if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc`
map_kwargs["desc"] = f"Packing {dataset_name} dataset"
columns = ["input_ids"]
if "completion_mask" in dataset.column_names:
columns.append("completion_mask")
if "assistant_masks" in dataset.column_names:
columns.append("assistant_masks")
dataset = dataset.select_columns(columns)
# Packing adds new column "seq_lengths" needed for document aware FlashAttention
dataset = pack_dataset(dataset, args.max_length, args.packing_strategy, map_kwargs)
elif args.max_length is not None:
if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc`
map_kwargs["desc"] = f"Truncating {dataset_name} dataset"
dataset = truncate_dataset(dataset, args.max_length, map_kwargs)
# For Liger kernel, ensure only the essential columns
if args.use_liger_kernel:
collator_expected_keys = {"input_ids", "seq_lengths", "completion_mask", "assistant_masks"}
dataset = dataset.select_columns(collator_expected_keys.intersection(dataset.column_names))
return dataset
def _set_signature_columns_if_needed(self):
# If `self.args.remove_unused_columns` is True, non-signature columns are removed.
# By default, this method sets `self._signature_columns` to the model's expected inputs (usually, "input_ids"
# and "attention_mask"). When using `train_on_completion_only` we add a "completion_mask" column to the
# dataset. So we need to override the default signature columns to include "completion_mask" as well.
if self._signature_columns is None:
if self._is_vlm:
self._signature_columns = ["messages", "prompt", "completion", "images"]
else:
self._signature_columns = ["input_ids", "labels", "seq_lengths", "completion_mask", "assistant_masks"]
def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
"""
Compute training loss and additionally compute token accuracies
"""
mode = "train" if self.model.training else "eval"
# If not set, defaults from model config and may warn since cache isn't compatible with gradient checkpointing
inputs["use_cache"] = False
(loss, outputs) = super().compute_loss(
model, inputs, return_outputs=True, num_items_in_batch=num_items_in_batch
)
# Compute entropy
with torch.no_grad():
per_token_entropy = entropy_from_logits(outputs.logits)
if "attention_mask" in inputs:
attention_mask = inputs["attention_mask"]
# When using Prompt Tuning, we need to add attention for the virtual tokens (all set to 1).
virtual_attention_mask = torch.ones(
attention_mask.size(0), self.num_virtual_tokens, device=attention_mask.device
)
attention_mask = torch.cat((virtual_attention_mask, attention_mask), dim=1)
entropy = torch.sum(per_token_entropy * attention_mask) / attention_mask.sum()
elif "position_ids" in inputs:
entropy = torch.mean(per_token_entropy)
else:
raise ValueError("Expected 'attention_mask' or 'position_ids' in inputs.")
entropy = self.accelerator.gather_for_metrics(entropy).mean().item()
self._metrics[mode]["entropy"].append(entropy)
if mode == "train":
# When using padding-free, the attention_mask is not present in the inputs, instead we have cu_seq_lens_q,
# cu_seq_lens_k, and max_length_k, max_length_q and position_ids.
if "attention_mask" in inputs:
num_tokens_in_batch = self.accelerator.gather_for_metrics(inputs["attention_mask"].sum()).sum().item()
elif "position_ids" in inputs:
local_num_tokens = torch.tensor(inputs["position_ids"].size(1), device=inputs["position_ids"].device)
num_tokens_in_batch = self.accelerator.gather_for_metrics(local_num_tokens).sum().item()
else:
raise ValueError("Expected 'attention_mask' or 'position_ids' in inputs.")
self._total_train_tokens += num_tokens_in_batch
self._metrics[mode]["num_tokens"] = [self._total_train_tokens]
# Compute token accuracy if we have labels and if the model is not using Liger (no logits)
if "labels" in inputs and not self.args.use_liger_kernel:
with torch.no_grad():
shift_logits = outputs.logits[..., :-1, :].contiguous()
shift_labels = inputs["labels"][..., 1:].contiguous()
# When using Prompt Tuning, skip the virtual tokens in logits before accuracy computation, since they do
# not correspond to actual input labels.
shift_logits = shift_logits[:, self.num_virtual_tokens :, :]
# Get predictions
predictions = shift_logits.argmax(dim=-1)
# Create mask for non-padding tokens (assuming ignore_index is -100)
mask = shift_labels != -100
# Calculate accuracy only on non-padding tokens
correct_predictions = (predictions == shift_labels) & mask
total_tokens = mask.sum()
correct_tokens = correct_predictions.sum()
# Gather the correct_tokens and total_tokens across all processes
correct_tokens = self.accelerator.gather_for_metrics(correct_tokens)
total_tokens = self.accelerator.gather_for_metrics(total_tokens)
# Compute the mean token accuracy and log it
total_sum = total_tokens.sum()
accuracy = (correct_tokens.sum() / total_sum).item() if total_sum > 0 else 0.0
self._metrics[mode]["mean_token_accuracy"].append(accuracy)
return (loss, outputs) if return_outputs else loss
# Override training step to add activation offloading context.
def training_step(self, *args, **kwargs):
with self.maybe_activation_offload_context:
return super().training_step(*args, **kwargs)
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
mode = "train" if self.model.training else "eval"
metrics = {key: sum(val) / len(val) for key, val in self._metrics[mode].items()} # average the metrics
# This method can be called both in training and evaluation. When called in evaluation, the keys in `logs`
# start with "eval_". We need to add the prefix "eval_" to the keys in `metrics` to match the format.
if mode == "eval":
metrics = {f"eval_{key}": val for key, val in metrics.items()}
logs.update(metrics)
super().log(logs, start_time)
self._metrics[mode].clear()
# Ensure the model card is saved along with the checkpoint
def _save_checkpoint(self, model, trial):
if self.args.hub_model_id is None:
model_name = Path(self.args.output_dir).name
else:
model_name = self.args.hub_model_id.split("/")[-1]
self.create_model_card(model_name=model_name)
super()._save_checkpoint(model, trial)
def create_model_card(
self,
model_name: Optional[str] = None,
dataset_name: Optional[str] = None,
tags: Union[str, list[str], None] = None,
):
"""
Creates a draft of a model card using the information available to the `Trainer`.
Args:
model_name (`str` or `None`, *optional*, defaults to `None`):
Name of the model.
dataset_name (`str` or `None`, *optional*, defaults to `None`):
Name of the dataset used for training.
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
Tags to be associated with the model card.
"""
if not self.is_world_process_zero():
return
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
base_model = self.model.config._name_or_path
else:
base_model = None
# normalize `tags` to a mutable set
if tags is None:
tags = set()
elif isinstance(tags, str):
tags = {tags}
else:
tags = set(tags)
if hasattr(self.model.config, "unsloth_version"):
tags.add("unsloth")
tags.update(self._tag_names)
model_card = generate_model_card(
base_model=base_model,
model_name=model_name,
hub_model_id=self.hub_model_id,
dataset_name=dataset_name,
tags=list(tags),
wandb_url=wandb.run.url if is_wandb_available() and wandb.run is not None else None,
comet_url=get_comet_experiment_url(),
trainer_name="SFT",
)
model_card.save(os.path.join(self.args.output_dir, "README.md"))
| trl/trl/trainer/sft_trainer.py/0 | {
"file_path": "trl/trl/trainer/sft_trainer.py",
"repo_id": "trl",
"token_count": 28011
} | 605 |
- title: Unit 0. Welcome to the course
sections:
- local: unit0/introduction
title: Welcome to the course 🤗
- local: unit0/onboarding
title: Onboarding
- local: unit0/discord101
title: (Optional) Discord 101
- title: Live 1. How the course works and Q&A
sections:
- local: communication/live1
title: Live 1. How the course works and Q&A
- title: Unit 1. Introduction to Agents
sections:
- local: unit1/introduction
title: Introduction
- local: unit1/what-are-agents
title: What is an Agent?
- local: unit1/quiz1
title: Quick Quiz 1
- local: unit1/what-are-llms
title: What are LLMs?
- local: unit1/messages-and-special-tokens
title: Messages and Special Tokens
- local: unit1/tools
title: What are Tools?
- local: unit1/quiz2
title: Quick Quiz 2
- local: unit1/agent-steps-and-structure
title: Understanding AI Agents through the Thought-Action-Observation Cycle
- local: unit1/thoughts
title: Thought, Internal Reasoning and the Re-Act Approach
- local: unit1/actions
title: Actions, Enabling the Agent to Engage with Its Environment
- local: unit1/observations
title: Observe, Integrating Feedback to Reflect and Adapt
- local: unit1/dummy-agent-library
title: Dummy Agent Library
- local: unit1/tutorial
title: Let’s Create Our First Agent Using smolagents
- local: unit1/final-quiz
title: Unit 1 Final Quiz
- local: unit1/conclusion
title: Conclusion
- title: Unit 2. Frameworks for AI Agents
sections:
- local: unit2/introduction
title: Frameworks for AI Agents
- title: Unit 2.1 The smolagents framework
sections:
- local: unit2/smolagents/introduction
title: Introduction to smolagents
- local: unit2/smolagents/why_use_smolagents
title: Why use smolagents?
- local: unit2/smolagents/quiz1
title: Quick Quiz 1
- local: unit2/smolagents/code_agents
title: Building Agents That Use Code
- local: unit2/smolagents/tool_calling_agents
title: Writing actions as code snippets or JSON blobs
- local: unit2/smolagents/tools
title: Tools
- local: unit2/smolagents/retrieval_agents
title: Retrieval Agents
- local: unit2/smolagents/quiz2
title: Quick Quiz 2
- local: unit2/smolagents/multi_agent_systems
title: Multi-Agent Systems
- local: unit2/smolagents/vision_agents
title: Vision and Browser agents
- local: unit2/smolagents/final_quiz
title: Final Quiz
- local: unit2/smolagents/conclusion
title: Conclusion
- title: Unit 2.2 The LlamaIndex framework
sections:
- local: unit2/llama-index/introduction
title: Introduction to LLamaIndex
- local: unit2/llama-index/llama-hub
title: Introduction to LlamaHub
- local: unit2/llama-index/components
title: What are Components in LlamaIndex?
- local: unit2/llama-index/tools
title: Using Tools in LlamaIndex
- local: unit2/llama-index/quiz1
title: Quick Quiz 1
- local: unit2/llama-index/agents
title: Using Agents in LlamaIndex
- local: unit2/llama-index/workflows
title: Creating Agentic Workflows in LlamaIndex
- local: unit2/llama-index/quiz2
title: Quick Quiz 2
- local: unit2/llama-index/conclusion
title: Conclusion
- title: Unit 2.3 The LangGraph framework
sections:
- local: unit2/langgraph/introduction
title: Introduction to LangGraph
- local: unit2/langgraph/when_to_use_langgraph
title: What is LangGraph?
- local: unit2/langgraph/building_blocks
title: Building Blocks of LangGraph
- local: unit2/langgraph/first_graph
title: Building Your First LangGraph
- local: unit2/langgraph/document_analysis_agent
title: Document Analysis Graph
- local: unit2/langgraph/quiz1
title: Quick Quiz 1
- local: unit2/langgraph/conclusion
title: Conclusion
- title: Unit 3. Use Case for Agentic RAG
sections:
- local: unit3/agentic-rag/introduction
title: Introduction to Use Case for Agentic RAG
- local: unit3/agentic-rag/agentic-rag
title: Agentic Retrieval Augmented Generation (RAG)
- local: unit3/agentic-rag/invitees
title: Creating a RAG Tool for Guest Stories
- local: unit3/agentic-rag/tools
title: Building and Integrating Tools for Your Agent
- local: unit3/agentic-rag/agent
title: Creating Your Gala Agent
- local: unit3/agentic-rag/conclusion
title: Conclusion
- title: Unit 4. Final Project - Create, Test, and Certify Your Agent
sections:
- local: unit4/introduction
title: Introduction to the Final Unit
- local: unit4/what-is-gaia
title: What is GAIA?
- local: unit4/hands-on
title: The Final Hands-On
- local: unit4/get-your-certificate
title: Get Your Certificate Of Excellence
- local: unit4/conclusion
title: Conclusion of the Course
- local: unit4/additional-readings
title: What Should You Learn Now?
- title: Bonus Unit 1. Fine-tuning an LLM for Function-calling
sections:
- local: bonus-unit1/introduction
title: Introduction
- local: bonus-unit1/what-is-function-calling
title: What is Function Calling?
- local: bonus-unit1/fine-tuning
title: Let's Fine-Tune your model for Function-calling
- local: bonus-unit1/conclusion
title: Conclusion
- title: Bonus Unit 2. Agent Observability and Evaluation
sections:
- local: bonus-unit2/introduction
title: Introduction
- local: bonus-unit2/what-is-agent-observability-and-evaluation
title: What is agent observability and evaluation?
- local: bonus-unit2/monitoring-and-evaluating-agents-notebook
title: Monitoring and evaluating agents
- local: bonus-unit2/quiz
title: Quiz
- title: Bonus Unit 3. Agents in Games with Pokemon
sections:
- local: bonus-unit3/introduction
title: Introduction
- local: bonus-unit3/state-of-art
title: The State of the Art in Using LLMs in Games
- local: bonus-unit3/from-llm-to-agents
title: From LLMs to AI Agents
- local: bonus-unit3/building_your_pokemon_agent
title: Build Your Own Pokémon Battle Agent
- local: bonus-unit3/launching_agent_battle
title: Launching Your Pokémon Battle Agent
- local: bonus-unit3/conclusion
title: Conclusion
| agents-course/units/en/_toctree.yml/0 | {
"file_path": "agents-course/units/en/_toctree.yml",
"repo_id": "agents-course",
"token_count": 2077
} | 0 |
# (Optional) Discord 101 [[discord-101]]
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/discord-etiquette.jpg" alt="The Discord Etiquette" width="100%"/>
This guide is designed to help you get started with Discord, a free chat platform popular in the gaming and ML communities.
Join the Hugging Face Community Discord server, which **has over 100,000 members**, by clicking <a href="https://discord.gg/UrrTSsSyjb" target="_blank">here</a>. It's a great place to connect with others!
## The Agents course on Hugging Face's Discord Community
Starting on Discord can be a bit overwhelming, so here's a quick guide to help you navigate.
<!-- Not the case anymore, you'll be prompted to choose your interests. Be sure to select **"AI Agents"** to gain access to the AI Agents Category, which includes all the course-related channels. Feel free to explore and join additional channels if you wish! 🚀-->
The HF Community Server hosts a vibrant community with interests in various areas, offering opportunities for learning through paper discussions, events, and more.
After [signing up](http://hf.co/join/discord), introduce yourself in the `#introduce-yourself` channel.
We created 4 channels for the Agents Course:
- `agents-course-announcements`: for the **latest course informations**.
- `🎓-agents-course-general`: for **general discussions and chitchat**.
- `agents-course-questions`: to **ask questions and help your classmates**.
- `agents-course-showcase`: to **show your best agents**.
In addition you can check:
- `smolagents`: for **discussion and support with the library**.
## Tips for using Discord effectively
### How to join a server
If you are less familiar with Discord, you might want to check out this <a href="https://support.discord.com/hc/en-us/articles/360034842871-How-do-I-join-a-Server#h_01FSJF9GT2QJMS2PRAW36WNBS8" target="_blank">guide</a> on how to join a server.
Here's a quick summary of the steps:
1. Click on the <a href="https://discord.gg/UrrTSsSyjb" target="_blank">Invite Link</a>.
2. Sign in with your Discord account, or create an account if you don't have one.
3. Validate that you are not an AI agent!
4. Setup your nickname and avatar.
5. Click "Join Server".
### How to use Discord effectively
Here are a few tips for using Discord effectively:
- **Voice channels** are available, though text chat is more commonly used.
- You can format text using **markdown style**, which is especially useful for writing code. Note that markdown doesn't work as well for links.
- Consider opening threads for **long conversations** to keep discussions organized.
We hope you find this guide helpful! If you have any questions, feel free to ask us on Discord 🤗.
| agents-course/units/en/unit0/discord101.mdx/0 | {
"file_path": "agents-course/units/en/unit0/discord101.mdx",
"repo_id": "agents-course",
"token_count": 764
} | 1 |
# Let's Create Our First Agent Using smolagents
In the last section, we learned how we can create Agents from scratch using Python code, and we **saw just how tedious that process can be**. Fortunately, many Agent libraries simplify this work by **handling much of the heavy lifting for you**.
In this tutorial, **you'll create your very first Agent** capable of performing actions such as image generation, web search, time zone checking and much more!
You will also publish your agent **on a Hugging Face Space so you can share it with friends and colleagues**.
Let's get started!
## What is smolagents?
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/smolagents.png" alt="smolagents"/>
To make this Agent, we're going to use `smolagents`, a library that **provides a framework for developing your agents with ease**.
This lightweight library is designed for simplicity, but it abstracts away much of the complexity of building an Agent, allowing you to focus on designing your agent's behavior.
We're going to get deeper into smolagents in the next Unit. Meanwhile, you can also check this <a href="https://huggingface.co/blog/smolagents" target="_blank">blog post</a> or the library's <a href="https://github.com/huggingface/smolagents" target="_blank">repo in GitHub</a>.
In short, `smolagents` is a library that focuses on **codeAgent**, a kind of agent that performs **"Actions"** through code blocks, and then **"Observes"** results by executing the code.
Here is an example of what we'll build!
We provided our agent with an **Image generation tool** and asked it to generate an image of a cat.
The agent inside `smolagents` is going to have the **same behaviors as the custom one we built previously**: it's going **to think, act and observe in cycle** until it reaches a final answer:
<iframe width="560" height="315" src="https://www.youtube.com/embed/PQDKcWiuln4?si=ysSTDZoi8y55FVvA" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe>
Exciting, right?
## Let's build our Agent!
To start, duplicate this Space: <a href="https://huggingface.co/spaces/agents-course/First_agent_template" target="_blank">https://huggingface.co/spaces/agents-course/First_agent_template</a>
> Thanks to <a href="https://huggingface.co/m-ric" target="_blank">Aymeric</a> for this template! 🙌
Duplicating this space means **creating a local copy on your own profile**:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/duplicate-space.gif" alt="Duplicate"/>
After duplicating the Space, you'll need to add your Hugging Face API token so your agent can access the model API:
1. First, get your Hugging Face token from [https://hf.co/settings/tokens](https://hf.co/settings/tokens) with permission for inference, if you don't already have one
2. Go to your duplicated Space and click on the **Settings** tab
3. Scroll down to the **Variables and Secrets** section and click **New Secret**
4. Create a secret with the name `HF_TOKEN` and paste your token as the value
5. Click **Save** to store your token securely
Throughout this lesson, the only file you will need to modify is the (currently incomplete) **"app.py"**. You can see here the [original one in the template](https://huggingface.co/spaces/agents-course/First_agent_template/blob/main/app.py). To find yours, go to your copy of the space, then click the `Files` tab and then on `app.py` in the directory listing.
Let's break down the code together:
- The file begins with some simple but necessary library imports
```python
from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, InferenceClientModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
```
As outlined earlier, we will directly use the **CodeAgent** class from **smolagents**.
### The Tools
Now let's get into the tools! If you want a refresher about tools, don't hesitate to go back to the [Tools](tools) section of the course.
```python
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: # it's important to specify the return type
# Keep this format for the tool description / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
```
The Tools are what we are encouraging you to build in this section! We give you two examples:
1. A **non-working dummy Tool** that you can modify to make something useful.
2. An **actually working Tool** that gets the current time somewhere in the world.
To define your tool it is important to:
1. Provide input and output types for your function, like in `get_current_time_in_timezone(timezone: str) -> str:`
2. **A well formatted docstring**. `smolagents` is expecting all the arguments to have a **textual description in the docstring**.
### The Agent
It uses [`Qwen/Qwen2.5-Coder-32B-Instruct`](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) as the LLM engine. This is a very capable model that we'll access via the serverless API.
```python
final_answer = FinalAnswerTool()
model = InferenceClientModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# We're creating our CodeAgent
agent = CodeAgent(
model=model,
tools=[final_answer], # add your tools here (don't remove final_answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch()
```
This Agent still uses the `InferenceClient` we saw in an earlier section behind the **InferenceClientModel** class!
We will give more in-depth examples when we present the framework in Unit 2. For now, you need to focus on **adding new tools to the list of tools** using the `tools` parameter of your Agent.
For example, you could use the `DuckDuckGoSearchTool` that was imported in the first line of the code, or you can examine the `image_generation_tool` that is loaded from the Hub later in the code.
**Adding tools will give your agent new capabilities**, try to be creative here!
### The System Prompt
The agent's system prompt is stored in a separate `prompts.yaml` file. This file contains predefined instructions that guide the agent's behavior.
Storing prompts in a YAML file allows for easy customization and reuse across different agents or use cases.
You can check the [Space's file structure](https://huggingface.co/spaces/agents-course/First_agent_template/tree/main) to see where the `prompts.yaml` file is located and how it's organized within the project.
The complete "app.py":
```python
from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# Below is an example of a tool that does nothing. Amaze us with your creativity!
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: # it's important to specify the return type
# Keep this format for the tool description / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
model = InferenceClientModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
# Load system prompt from prompt.yaml file
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer], # add your tools here (don't remove final_answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates # Pass system prompt to CodeAgent
)
GradioUI(agent).launch()
```
Your **Goal** is to get familiar with the Space and the Agent.
Currently, the agent in the template **does not use any tools, so try to provide it with some of the pre-made ones or even make some new tools yourself!**
We are eagerly waiting for your amazing agents output in the discord channel **#agents-course-showcase**!
---
Congratulations, you've built your first Agent! Don't hesitate to share it with your friends and colleagues.
Since this is your first try, it's perfectly normal if it's a little buggy or slow. In future units, we'll learn how to build even better Agents.
The best way to learn is to try, so don't hesitate to update it, add more tools, try with another model, etc.
In the next section, you're going to fill the final Quiz and get your certificate!
| agents-course/units/en/unit1/tutorial.mdx/0 | {
"file_path": "agents-course/units/en/unit1/tutorial.mdx",
"repo_id": "agents-course",
"token_count": 3262
} | 2 |
# Introduction to the LlamaHub
**LlamaHub is a registry of hundreds of integrations, agents and tools that you can use within LlamaIndex.**

We will be using various integrations in this course, so let's first look at the LlamaHub and how it can help us.
Let's see how to find and install the dependencies for the components we need.
## Installation
LlamaIndex installation instructions are available as a well-structured **overview on [LlamaHub](https://llamahub.ai/)**.
This might be a bit overwhelming at first, but most of the **installation commands generally follow an easy-to-remember format**:
```bash
pip install llama-index-{component-type}-{framework-name}
```
Let's try to install the dependencies for an LLM and embedding component using the [Hugging Face inference API integration](https://llamahub.ai/l/llms/llama-index-llms-huggingface-api?from=llms).
```bash
pip install llama-index-llms-huggingface-api llama-index-embeddings-huggingface
```
## Usage
Once installed, we can see the usage patterns. You'll notice that the import paths follow the install command!
Underneath, we can see an example of the usage of **the Hugging Face inference API for an LLM component**.
```python
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
import os
from dotenv import load_dotenv
# Load the .env file
load_dotenv()
# Retrieve HF_TOKEN from the environment variables
hf_token = os.getenv("HF_TOKEN")
llm = HuggingFaceInferenceAPI(
model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
temperature=0.7,
max_tokens=100,
token=hf_token,
provider="auto"
)
response = llm.complete("Hello, how are you?")
print(response)
# I am good, how can I help you today?
```
Wonderful, we now know how to find, install and use the integrations for the components we need.
**Let's dive deeper into the components** and see how we can use them to build our own agents.
| agents-course/units/en/unit2/llama-index/llama-hub.mdx/0 | {
"file_path": "agents-course/units/en/unit2/llama-index/llama-hub.mdx",
"repo_id": "agents-course",
"token_count": 637
} | 3 |

# Why use smolagents
In this module, we will explore the pros and cons of using [smolagents](https://huggingface.co/docs/smolagents/en/index), helping you make an informed decision about whether it's the right framework for your needs.
## What is `smolagents`?
`smolagents` is a simple yet powerful framework for building AI agents. It provides LLMs with the _agency_ to interact with the real world, such as searching or generating images.
As we learned in unit 1, AI agents are programs that use LLMs to generate **'thoughts'** based on **'observations'** to perform **'actions'**. Let's explore how this is implemented in smolagents.
### Key Advantages of `smolagents`
- **Simplicity:** Minimal code complexity and abstractions, to make the framework easy to understand, adopt and extend
- **Flexible LLM Support:** Works with any LLM through integration with Hugging Face tools and external APIs
- **Code-First Approach:** First-class support for Code Agents that write their actions directly in code, removing the need for parsing and simplifying tool calling
- **HF Hub Integration:** Seamless integration with the Hugging Face Hub, allowing the use of Gradio Spaces as tools
### When to use smolagents?
With these advantages in mind, when should we use smolagents over other frameworks?
smolagents is ideal when:
- You need a **lightweight and minimal solution.**
- You want to **experiment quickly** without complex configurations.
- Your **application logic is straightforward.**
### Code vs. JSON Actions
Unlike other frameworks where agents write actions in JSON, `smolagents` **focuses on tool calls in code**, simplifying the execution process. This is because there's no need to parse the JSON in order to build code that calls the tools: the output can be executed directly.
The following diagram illustrates this difference:

To review the difference between Code vs JSON Actions, you can revisit [the Actions Section in Unit 1](https://huggingface.co/learn/agents-course/unit1/actions#actions-enabling-the-agent-to-engage-with-its-environment).
### Agent Types in `smolagents`
Agents in `smolagents` operate as **multi-step agents**.
Each [`MultiStepAgent`](https://huggingface.co/docs/smolagents/main/en/reference/agents#smolagents.MultiStepAgent) performs:
- One thought
- One tool call and execution
In addition to using **[CodeAgent](https://huggingface.co/docs/smolagents/main/en/reference/agents#smolagents.CodeAgent)** as the primary type of agent, smolagents also supports **[ToolCallingAgent](https://huggingface.co/docs/smolagents/main/en/reference/agents#smolagents.ToolCallingAgent)**, which writes tool calls in JSON.
We will explore each agent type in more detail in the following sections.
<Tip>
In smolagents, tools are defined using <code>@tool</code> decorator wrapping a Python function or the <code>Tool</code> class.
</Tip>
### Model Integration in `smolagents`
`smolagents` supports flexible LLM integration, allowing you to use any callable model that meets [certain criteria](https://huggingface.co/docs/smolagents/main/en/reference/models). The framework provides several predefined classes to simplify model connections:
- **[TransformersModel](https://huggingface.co/docs/smolagents/main/en/reference/models#smolagents.TransformersModel):** Implements a local `transformers` pipeline for seamless integration.
- **[InferenceClientModel](https://huggingface.co/docs/smolagents/main/en/reference/models#smolagents.InferenceClientModel):** Supports [serverless inference](https://huggingface.co/docs/huggingface_hub/main/en/guides/inference) calls through [Hugging Face's infrastructure](https://huggingface.co/docs/api-inference/index), or via a growing number of [third-party inference providers](https://huggingface.co/docs/huggingface_hub/main/en/guides/inference#supported-providers-and-tasks).
- **[LiteLLMModel](https://huggingface.co/docs/smolagents/main/en/reference/models#smolagents.LiteLLMModel):** Leverages [LiteLLM](https://www.litellm.ai/) for lightweight model interactions.
- **[OpenAIServerModel](https://huggingface.co/docs/smolagents/main/en/reference/models#smolagents.OpenAIServerModel):** Connects to any service that offers an OpenAI API interface.
- **[AzureOpenAIServerModel](https://huggingface.co/docs/smolagents/main/en/reference/models#smolagents.AzureOpenAIServerModel):** Supports integration with any Azure OpenAI deployment.
This flexibility ensures that developers can choose the model and service most suitable for their specific use cases, and allows for easy experimentation.
Now that we understood why and when to use smolagents, let's dive deeper into this powerful library!
## Resources
- [smolagents Blog](https://huggingface.co/blog/smolagents) - Introduction to smolagents and code interactions
| agents-course/units/en/unit2/smolagents/why_use_smolagents.mdx/0 | {
"file_path": "agents-course/units/en/unit2/smolagents/why_use_smolagents.mdx",
"repo_id": "agents-course",
"token_count": 1410
} | 4 |
# Hagamos Fine-Tuning de Tu Modelo para Llamadas a Funciones
Ahora estamos listos para hacer fine-tuning de nuestro primer modelo para llamadas a funciones 🔥.
## ¿Cómo entrenamos nuestro modelo para llamadas a funciones?
> Respuesta: Necesitamos **datos**
Un proceso de entrenamiento de modelo se puede dividir en 3 pasos:
1. **El modelo es pre-entrenado con una gran cantidad de datos**. El resultado de ese paso es un **modelo pre-entrenado**. Por ejemplo, [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b). Es un modelo base y solo sabe cómo **predecir el siguiente token sin fuertes capacidades de seguimiento de instrucciones**.
2. Para ser útil en un contexto de chat, el modelo luego necesita ser **ajustado (fine-tuned)** para seguir instrucciones. En este paso, puede ser entrenado por los creadores del modelo, la comunidad de código abierto, tú o cualquier persona. Por ejemplo, [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it) es un modelo ajustado para instrucciones por el equipo de Google detrás del proyecto Gemma.
3. El modelo puede luego **alinearse** con las preferencias del creador. Por ejemplo, un modelo de chat de servicio al cliente que nunca debe ser grosero con los clientes.
Usualmente un producto completo como Gemini o Mistral **pasará por los 3 pasos**, mientras que los modelos que puedes encontrar en Hugging Face han completado uno o más pasos de este entrenamiento.
En este tutorial, construiremos un modelo de llamadas a funciones basado en [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it). Elegimos el modelo ajustado [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it) en lugar del modelo base [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b) porque el modelo ajustado ha sido mejorado para nuestro caso de uso.
Comenzar desde el modelo pre-entrenado **requeriría más entrenamiento para aprender a seguir instrucciones, chatear Y hacer llamadas a funciones**.
Al comenzar desde el modelo ajustado para instrucciones, **minimizamos la cantidad de información que nuestro modelo necesita aprender**.
## LoRA (Adaptación de Bajo Rango de Modelos de Lenguaje Grandes)
LoRA es una técnica de entrenamiento popular y ligera que **reduce significativamente el número de parámetros a entrenar**.
Funciona **insertando un número menor de nuevos pesos(weights) como un adaptador en el modelo para entrenar**. Esto hace que el entrenamiento con LoRA sea mucho más rápido, eficiente en memoria y produzca pesos(weights) de modelo más pequeños (unos cientos de MB), que son más fáciles de almacenar y compartir.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/blog_multi-lora-serving_LoRA.gif" alt="Inferencia LoRA" width="50%"/>
LoRA funciona añadiendo pares de matrices de descomposición de rango a las capas del Transformer, típicamente centrándose en las capas lineales. Durante el entrenamiento, "congelaremos" el resto del modelo y solo actualizaremos los pesos de esos adaptadores recién añadidos.
Al hacerlo, el número de **parámetros** que necesitamos entrenar disminuye considerablemente ya que solo necesitamos actualizar los pesos del adaptador.
Durante la inferencia, la entrada se pasa al adaptador y al modelo base, o estos pesos del adaptador pueden fusionarse con el modelo base, lo que resulta en ninguna sobrecarga adicional de latencia.
LoRA es particularmente útil para adaptar modelos de lenguaje **grandes** a tareas o dominios específicos mientras se mantienen manejables los requisitos de recursos. Esto ayuda a reducir la memoria **requerida** para entrenar un modelo.
Si quieres aprender más sobre cómo funciona LoRA, deberías consultar este [tutorial](https://huggingface.co/learn/nlp-course/chapter11/4?fw=pt).
## Fine-Tuning de un Modelo para Llamadas a Funciones
Puedes acceder al notebook del tutorial 👉 [aquí](https://huggingface.co/agents-course/notebooks/blob/main/bonus-unit1/bonus-unit1.ipynb).
Luego, haz clic en [](https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/bonus-unit1/bonus-unit1.ipynb) para poder ejecutarlo en un Notebook de Colab.
| agents-course/units/es/bonus-unit1/fine-tuning.mdx/0 | {
"file_path": "agents-course/units/es/bonus-unit1/fine-tuning.mdx",
"repo_id": "agents-course",
"token_count": 1585
} | 5 |
# Incorporación: Tus Primeros Pasos ⛵
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/time-to-onboard.jpg" alt="Hora de Incorporarse" width="100%"/>
Ahora que tienes todos los detalles, ¡comencemos! Vamos a hacer cuatro cosas:
1. **Crear tu Cuenta de Hugging Face** si aún no lo has hecho
2. **Registrarte en Discord y presentarte** (no seas tímido/a 🤗)
3. **Seguir el Curso de Agentes de Hugging Face** en el Hub
4. **Difundir** el curso
### Paso 1: Crea tu Cuenta de Hugging Face
(Si aún no lo has hecho) crea una cuenta de Hugging Face <a href='https://huggingface.co/join' target='_blank'>aquí</a>.
### Paso 2: Únete a Nuestra Comunidad de Discord
👉🏻 Únete a nuestro servidor de discord <a href="https://discord.gg/UrrTSsSyjb" target="_blank">aquí.</a>
Cuando te unas, recuerda presentarte en `#introduce-yourself`.
Tenemos múltiples canales relacionados con Agentes IA:
- `agents-course-announcements`: para la **información más reciente del curso**.
- `🎓-agents-course-general`: para **discusiones generales y charlas**.
- `agents-course-questions`: para **hacer preguntas y ayudar a tus compañeros**.
- `agents-course-showcase`: para **mostrar tus mejores agentes**.
Además, puedes consultar:
- `smolagents`: para **discusión y soporte con la biblioteca**.
Si es tu primera vez usando Discord, escribimos una guía Discord 101 para conocer las mejores prácticas. Consulta [la siguiente sección](discord101.mdx).
### Paso 3: Sigue la Organización del Curso de Agentes de Hugging Face
Mantente al día con los materiales más recientes del curso, actualizaciones y anuncios **siguiendo la Organización del Curso de Agentes de Hugging Face**.
👉 Ve <a href="https://huggingface.co/agents-course" target="_blank">aquí</a> y haz clic en **seguir**.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/hf_course_follow.gif" alt="Seguir" width="100%"/>
### Paso 4: Difunde el curso
¡Ayúdanos a hacer este curso más visible! Hay dos formas en que puedes ayudarnos:
1. Muestra tu apoyo con una ⭐ en <a href="https://github.com/huggingface/agents-course" target="_blank">el repositorio del curso</a>.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/please_star.gif" alt="Estrella en el repositorio"/>
2. Comparte tu Camino de Aprendizaje: ¡Haz que otros **sepan que estás tomando este curso**! Hemos preparado una ilustración que puedes usar en tus publicaciones de redes sociales
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/share.png">
Puedes descargar la imagen haciendo clic 👉 [aquí](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/share.png?download=true)
¡Felicidades! 🎉 **¡Has completado el proceso de incorporación**! Ahora estás listo/a para comenzar a aprender sobre Agentes IA. ¡Diviértete!
Sigue aprendiendo, mantente genial 🤗
| agents-course/units/es/unit0/onboarding.mdx/0 | {
"file_path": "agents-course/units/es/unit0/onboarding.mdx",
"repo_id": "agents-course",
"token_count": 1131
} | 6 |
# ¿Qué son los LLMs?
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-check-1.jpg" alt="Planificación de la Unidad 1"/>
En la sección anterior aprendimos que cada Agente necesita **un Modelo de IA en su núcleo**, y que los LLMs son el tipo más común de modelos de IA para este propósito.
Ahora aprenderemos qué son los LLMs y cómo impulsan a los Agentes.
Esta sección ofrece una explicación técnica concisa del uso de los LLMs. Si quieres profundizar más, puedes consultar nuestro <a href="https://huggingface.co/learn/nlp-course/chapter1/1" target="_blank">Curso gratuito de Procesamiento de Lenguaje Natural</a>.
## ¿Qué es un Modelo de Lenguaje Grande?
Un LLM es un tipo de modelo de IA que sobresale en **entender y generar lenguaje humano**. Son entrenados con vastas cantidades de datos textuales, lo que les permite aprender patrones, estructura e incluso matices en el lenguaje. Estos modelos típicamente constan de muchos millones de parámetros.
La mayoría de los LLMs actualmente están **construidos sobre la arquitectura Transformer**—una arquitectura de aprendizaje profundo basada en el algoritmo de "Atención", que ha ganado un interés significativo desde el lanzamiento de BERT de Google en 2018.
<figure>
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/transformer.jpg" alt="Transformer"/>
<figcaption>La arquitectura original del Transformer se veía así, con un codificador a la izquierda y un decodificador a la derecha.
</figcaption>
</figure>
Hay 3 tipos de transformers:
1. **Codificadores (Encoders)**
Un Transformer basado en codificador toma texto (u otros datos) como entrada y produce una representación densa (o embedding) de ese texto.
- **Ejemplo**: BERT de Google
- **Casos de uso**: Clasificación de texto, búsqueda semántica, Reconocimiento de Entidades Nombradas
- **Tamaño típico**: Millones de parámetros
2. **Decodificadores (Decoders)**
Un Transformer basado en decodificador se enfoca **en generar nuevos tokens para completar una secuencia, un token a la vez**.
- **Ejemplo**: Llama de Meta
- **Casos de uso**: Generación de texto, chatbots, generación de código
- **Tamaño típico**: Miles de millones de parámetros
3. **Seq2Seq (Codificador–Decodificador)**
Un Transformer de secuencia a secuencia _combina_ un codificador y un decodificador. El codificador primero procesa la secuencia de entrada en una representación contextual, luego el decodificador genera una secuencia de salida.
- **Ejemplo**: T5, BART
- **Casos de uso**: Traducción, Resumen, Parafraseo
- **Tamaño típico**: Millones de parámetros
Aunque los Modelos de Lenguaje Grandes vienen en varias formas, los LLMs son típicamente modelos basados en decodificador con miles de millones de parámetros. Aquí están algunos de los LLMs más conocidos:
| **Modelo** | **Proveedor** |
|-----------------------------------|-------------------------------------------|
| **Deepseek-R1** | DeepSeek |
| **GPT4** | OpenAI |
| **Llama 3** | Meta (Facebook AI Research) |
| **SmolLM2** | Hugging Face |
| **Gemma** | Google |
| **Mistral** | Mistral |
El principio subyacente de un LLM es simple pero altamente efectivo: **su objetivo es predecir el siguiente token, dada una secuencia de tokens previos**. Un "token" es la unidad de información con la que trabaja un LLM. Puedes pensar en un "token" como si fuera una "palabra", pero por razones de eficiencia los LLMs no usan palabras completas.
Por ejemplo, mientras que el inglés tiene aproximadamente 600,000 palabras, un LLM podría tener un vocabulario de alrededor de 32,000 tokens (como es el caso de Llama 2). La tokenización a menudo funciona con unidades subléxicas que pueden combinarse.
Por ejemplo, considera cómo los tokens "interés" y "ante" pueden combinarse para formar "interesante", o "ado" puede añadirse para formar "interesado".
Puedes experimentar con diferentes tokenizadores en el siguiente playground interactivo:
<iframe
src="https://agents-course-the-tokenizer-playground.static.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
Cada LLM tiene algunos **tokens especiales** específicos del modelo. El LLM usa estos tokens para abrir y cerrar los componentes estructurados de su generación. Por ejemplo, para indicar el inicio o fin de una secuencia, mensaje o respuesta. Además, los prompts de entrada que pasamos al modelo también están estructurados con tokens especiales. El más importante de ellos es el **Token de Fin de secuencia** (EOS).
Las formas de los tokens especiales son muy diversas entre los proveedores de modelos.
La siguiente tabla ilustra la diversidad de tokens especiales.
<table>
<thead>
<tr>
<th><strong>Modelo</strong></th>
<th><strong>Proveedor</strong></th>
<th><strong>Token EOS</strong></th>
<th><strong>Funcionalidad</strong></th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>GPT4</strong></td>
<td>OpenAI</td>
<td><code><|endoftext|></code></td>
<td>Fin del texto del mensaje</td>
</tr>
<tr>
<td><strong>Llama 3</strong></td>
<td>Meta (Facebook AI Research)</td>
<td><code><|eot_id|></code></td>
<td>Fin de secuencia</td>
</tr>
<tr>
<td><strong>Deepseek-R1</strong></td>
<td>DeepSeek</td>
<td><code><|end_of_sentence|></code></td>
<td>Fin del texto del mensaje</td>
</tr>
<tr>
<td><strong>SmolLM2</strong></td>
<td>Hugging Face</td>
<td><code><|im_end|></code></td>
<td>Fin de instrucción o mensaje</td>
</tr>
<tr>
<td><strong>Gemma</strong></td>
<td>Google</td>
<td><code><end_of_turn></code></td>
<td>Fin de turno de conversación</td>
</tr>
</tbody>
</table>
<Tip>
No esperamos que memorices estos tokens especiales, pero es importante apreciar su diversidad y el papel que desempeñan en la generación de texto de los LLMs. Si quieres saber más sobre tokens especiales, puedes consultar la configuración del modelo en su repositorio de Hub. Por ejemplo, puedes encontrar los tokens especiales del modelo SmolLM2 en su <a href="https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct/blob/main/tokenizer_config.json">tokenizer_config.json</a>.
</Tip>
## Entendiendo la predicción del siguiente token
Se dice que los LLMs son **autorregresivos**, lo que significa que **la salida de un paso se convierte en la entrada para el siguiente**. Este ciclo continúa hasta que el modelo predice que el siguiente token será el token EOS, momento en el cual el modelo puede detenerse.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AutoregressionSchema.gif" alt="GIF visual de decodificación autorregresiva" width="60%">
En otras palabras, un LLM decodificará texto hasta que alcance el EOS. Pero, ¿qué sucede durante un solo ciclo de decodificación?
Aunque el proceso completo puede ser bastante técnico para el propósito de aprender sobre agentes, aquí hay una breve descripción:
- Una vez que el texto de entrada es **tokenizado**, el modelo calcula una representación de la secuencia que captura información sobre el significado y la posición de cada token en la secuencia de entrada.
- Esta representación va al modelo, que produce puntuaciones que clasifican la probabilidad de cada token en su vocabulario de ser el siguiente en la secuencia.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/DecodingFinal.gif" alt="GIF visual de decodificación" width="60%">
Basándonos en estas puntuaciones, tenemos múltiples estrategias para seleccionar los tokens para completar la oración.
- La estrategia de decodificación más sencilla sería tomar siempre el token con la puntuación máxima.
Puedes interactuar con el proceso de decodificación tú mismo con SmolLM2 en este Space (recuerda, decodifica hasta alcanzar un token **EOS** que es **<|im_end|>** para este modelo):
<iframe
src="https://agents-course-decoding-visualizer.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
- Pero hay estrategias de decodificación más avanzadas. Por ejemplo, *beam search* explora múltiples secuencias candidatas para encontrar aquella con la puntuación total máxima–incluso si algunos tokens individuales tienen puntuaciones más bajas.
<iframe
src="https://agents-course-beam-search-visualizer.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
Si quieres saber más sobre decodificación, puedes echar un vistazo al [curso de NLP](https://huggingface.co/learn/nlp-course).
## La atención es todo lo que necesitas
Un aspecto clave de la arquitectura Transformer es la **Atención**. Al predecir la siguiente palabra, no todas las palabras en una oración son igualmente importantes; palabras como "Francia" y "capital" en la oración *"La capital de Francia es..."* llevan la mayor parte del significado.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AttentionSceneFinal.gif" alt="GIF visual de Atención" width="60%">
Este proceso de identificar las palabras más relevantes para predecir el siguiente token ha demostrado ser increíblemente efectivo.
Aunque el principio básico de los LLMs —predecir el siguiente token— ha permanecido consistente desde GPT-2, ha habido avances significativos en escalar redes neuronales y hacer que el mecanismo de atención funcione para secuencias cada vez más largas.
Si has interactuado con LLMs, probablemente estés familiarizado con el término *longitud de contexto*, que se refiere al número máximo de tokens que el LLM puede procesar, y el máximo _lapso de atención_ que tiene.
## Hacer prompting al LLM es importante
Considerando que el único trabajo de un LLM es predecir el siguiente token mirando cada token de entrada, y elegir qué tokens son "importantes", la redacción de tu secuencia de entrada es muy importante.
La secuencia de entrada que proporcionas a un LLM se llama _un prompt_. El diseño cuidadoso del prompt hace que sea más fácil **guiar la generación del LLM hacia la salida deseada**.
## ¿Cómo se entrenan los LLMs?
Los LLMs se entrenan en grandes conjuntos de datos de texto, donde aprenden a predecir la siguiente palabra en una secuencia a través de un objetivo de modelado de lenguaje autocontrolado o enmascarado.
A partir de este aprendizaje no supervisado, el modelo aprende la estructura del lenguaje y **patrones subyacentes en el texto, permitiendo al modelo generalizar a datos no vistos**.
Después de este _pre-entrenamiento_ inicial, los LLMs pueden ser afinados en un objetivo de aprendizaje supervisado para realizar tareas específicas. Por ejemplo, algunos modelos están entrenados para estructuras conversacionales o uso de herramientas, mientras que otros se centran en clasificación o generación de código.
## ¿Cómo puedo usar los LLMs?
Tienes dos opciones principales:
1. **Ejecutar Localmente** (si tienes hardware suficiente).
2. **Usar una Nube/API** (por ejemplo, a través de la API de Inferencia Serverless de Hugging Face).
A lo largo de este curso, utilizaremos principalmente modelos a través de APIs en el Hugging Face Hub. Más adelante, exploraremos cómo ejecutar estos modelos localmente en tu hardware.
## ¿Cómo se utilizan los LLMs en los Agentes de IA?
Los LLMs son un componente clave de los Agentes de IA, **proporcionando la base para entender y generar lenguaje humano**.
Pueden interpretar instrucciones del usuario, mantener contexto en conversaciones, definir un plan y decidir qué herramientas usar.
Exploraremos estos pasos con más detalle en esta Unidad, pero por ahora, lo que necesitas entender es que el LLM es **el cerebro del Agente**.
---
¡Eso fue mucha información! Hemos cubierto los conceptos básicos de qué son los LLMs, cómo funcionan y su papel en impulsar agentes de IA.
Si deseas profundizar aún más en el fascinante mundo de los modelos de lenguaje y el procesamiento del lenguaje natural, no dudes en consultar nuestro <a href="https://huggingface.co/learn/nlp-course/chapter1/1" target="_blank">curso gratuito de NLP</a>.
Ahora que entendemos cómo funcionan los LLMs, es hora de ver **cómo los LLMs estructuran sus generaciones en un contexto conversacional**.
Para ejecutar <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit1/dummy_agent_library.ipynb" target="_blank">este notebook</a>, **necesitas un token de Hugging Face** que puedes obtener de <a href="https://hf.co/settings/tokens" target="_blank">https://hf.co/settings/tokens</a>.
Para más información sobre cómo ejecutar Jupyter Notebooks, consulta <a href="https://huggingface.co/docs/hub/notebooks">Jupyter Notebooks en el Hugging Face Hub</a>.
También necesitas solicitar acceso a <a href="https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct" target="_blank">los modelos Meta Llama</a>. | agents-course/units/es/unit1/what-are-llms.mdx/0 | {
"file_path": "agents-course/units/es/unit1/what-are-llms.mdx",
"repo_id": "agents-course",
"token_count": 5215
} | 7 |
# Autoevaluación Rápida (sin calificar) [[quiz2]]
¿Qué?! Otra autoevaluación? Lo sabemos, lo sabemos, ... 😅 Pero esta breve autoevaluación no calificada está aquí para **ayudarte a reforzar conceptos clave que acabas de aprender**.
Esta evaluación cubre flujos de trabajo de agentes y interacciones - componentes esenciales para construir agentes de IA efectivos.
### Q1: ¿Cuál es el propósito de AgentWorkflow en LlamaIndex?
<Question
choices={[
{
text: "Ejecutar uno o más agentes con herramientas",
explain: "Sí, el AgentWorkflow es la principal forma de crear un sistema con uno o más agentes.",
correct: true
},
{
text: "Crear un agente nico que pueda consultar tus datos sin memoria",
explain: "No, el AgentWorkflow es más capaz que eso, el QueryEngine es para consultas simples sobre tus datos.",
},
{
text: "Construir herramientas automáticamente para agentes",
explain: "El AgentWorkflow no construye herramientas, ese es el trabajo del desarrollador.",
},
{
text: "Administrar la memoria y el estado de los agentes",
explain: "Administrar la memoria y el estado no es el propósito principal del AgentWorkflow.",
}
]}
/>
---
### Q2: ¿Qué objeto se utiliza para mantener el estado del flujo de trabajo?
<Question
choices={[
{
text: "Estado",
explain: "Estado no es el objeto correcto para la gestión de estado del flujo de trabajo.",
},
{
text: "Contexto",
explain: "Contexto es el objeto correcto utilizado para mantener el estado del flujo de trabajo.",
correct: true
},
{
text: "WorkflowState",
explain: "WorkflowState no es el objeto correcto.",
},
{
text: "Administración",
explain: "Administración no es un objeto válido para el estado del flujo de trabajo.",
}
]}
/>
---
### Q3: ¿Qué método debe utilizarse si deseas que un agente recuerde interacciones previas?
<Question
choices={[
{
text: "run(query_str)",
explain: ".run(query_str) no mantiene el historial de conversaciones.",
},
{
text: "chat(query_str, ctx=ctx)",
explain: "chat() no es un método válido en workflows.",
},
{
text: "interact(query_str)",
explain: "interact() no es un método válido para interacciones de agentes.",
},
{
text: "run(query_str, ctx=ctx)",
explain: "Al pasar y mantener el contexto, podemos mantener el estado!",
correct: true
}
]}
/>
---
### Q4: ¿Qué es una característica clave de Agentic RAG?
<Question
choices={[
{
text: "Solo puede usar herramientas basadas en documentos para responder a preguntas en un flujo de trabajo RAG",
explain: "Agentic RAG puede usar diferentes herramientas, incluyendo herramientas basadas en documentos.",
},
{
text: "Responde a preguntas automáticamente sin herramientas, como un chatbot",
explain: "Agentic RAG utiliza herramientas para responder a preguntas.",
},
{
text: "Puede decidir usar cualquier herramienta para responder a preguntas, incluyendo herramientas RAG",
explain: "Agentic RAG tiene la flexibilidad de usar diferentes herramientas para responder a preguntas.",
correct: true
},
{
text: "Solo funciona con Agentes de Llamada de Funciones",
explain: "Agentic RAG no est limitado a Agentes de Llamada de Funciones.",
}
]}
/>
---
¿Entendido? Genial! Ahora hagamos un **resumen breve de la unidad!**
| agents-course/units/es/unit2/llama-index/quiz2.mdx/0 | {
"file_path": "agents-course/units/es/unit2/llama-index/quiz2.mdx",
"repo_id": "agents-course",
"token_count": 1176
} | 8 |
# Creando tu Agente para la Gala
Ahora que hemos construido todos los componentes necesarios para Alfred, es momento de unirlos en un agente completo que pueda ayudar a organizar nuestra extravagante gala.
En esta sección, combinaremos la recuperación de información de invitados, búsqueda web, información meteorológica y herramientas de estadísticas de Hub en un solo agente poderoso.
## Ensamblando a Alfred: El Agente Completo
En lugar de reimplementar todas las herramientas que creamos en secciones anteriores, las importaremos desde sus respectivos módulos que guardamos en los archivos `tools.py` y `retriever.py`.
<Tip>
Si aún no has implementado las herramientas, regresa a las secciones de <a href="./tools">herramientas</a> y <a href="./invitees">recuperador</a> para implementarlas, y agrégalas a los archivos <code>tools.py</code> y <code>retriever.py</code>.
</Tip>
Importemos las bibliotecas y herramientas necesarias de las secciones anteriores:
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
# Importar librerías necesarias
import random
from smolagents import CodeAgent, InferenceClientModel
# Importar nuestras herramientas personalizadas desde sus módulos
from tools import DuckDuckGoSearchTool, WeatherInfoTool, HubStatsTool
from retriever import load_guest_dataset
```
Ahora, combinemos todas estas herramientas en un solo agente:
```python
# Inicializar el modelo de Hugging Face
model = InferenceClientModel()
# Inicializar la herramienta de búsqueda web
search_tool = DuckDuckGoSearchTool()
# Inicializar la herramienta del clima
weather_info_tool = WeatherInfoTool()
# Inicializar la herramienta de estadísticas de Hub
hub_stats_tool = HubStatsTool()
# Cargar el conjunto de datos de invitados e inicializar la herramienta de información de invitados
guest_info_tool = load_guest_dataset()
# Crear a Alfred con todas las herramientas
alfred = CodeAgent(
tools=[guest_info_tool, weather_info_tool, hub_stats_tool, search_tool],
model=model,
add_base_tools=True, # Agregar cualquier herramienta base adicional
planning_interval=3 # Habilitar planificación cada 3 pasos
)
```
</hfoption>
<hfoption id="llama-index">
```python
# Importar librerías necesarias
from llama_index.core.agent.workflow import AgentWorkflow
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
from tools import search_tool, weather_info_tool, hub_stats_tool
from retriever import guest_info_tool
```
Ahora, combinemos todas estas herramientas en un solo agente:
```python
# Inicializar el modelo de Hugging Face
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
# Crear Alfred con todas las herramientas
alfred = AgentWorkflow.from_tools_or_functions(
[guest_info_tool, search_tool, weather_info_tool, hub_stats_tool],
llm=llm,
)
```
</hfoption>
<hfoption id="langgraph">
```python
from typing import TypedDict, Annotated
from langgraph.graph.message import add_messages
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage
from langgraph.prebuilt import ToolNode
from langgraph.graph import START, StateGraph
from langgraph.prebuilt import tools_condition
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
from tools import DuckDuckGoSearchTool, weather_info_tool, hub_stats_tool
from retriever import load_guest_dataset
```
Ahora, combina todas estas herramientas en un solo agente:
```python
# Inicializar la herramienta de búsqueda web
search_tool = DuckDuckGoSearchTool()
# Cargar el conjunto de datos de invitados e inicializar la herramienta de información de invitados
guest_info_tool = load_guest_dataset()
# Generar la interfaz de chat, incluyendo las herramientas
llm = HuggingFaceEndpoint(
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
)
chat = ChatHuggingFace(llm=llm, verbose=True)
tools = [guest_info_tool, search_tool, weather_info_tool, hub_stats_tool]
chat_with_tools = chat.bind_tools(tools)
# Generar el AgentState y el grafo del Agente
class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
def assistant(state: AgentState):
return {
"messages": [chat_with_tools.invoke(state["messages"])],
}
## El grafo
builder = StateGraph(AgentState)
# Definir nodos: estos hacen el trabajo
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# Definir bordes: estos determinan cómo se mueve el flujo de control
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# Si el último mensaje requiere una herramienta, enrutar a herramientas
# De lo contrario, proporcionar una respuesta directa
tools_condition,
)
builder.add_edge("tools", "assistant")
alfred = builder.compile()
```
</hfoption>
</hfoptions>
¡Tu agente ahora está listo para usarse!
## Usando a Alfred: Ejemplos de Principio a Fin
Ahora que Alfred está completamente equipado con todas las herramientas necesarias, veamos cómo puede ayudar con varias tareas durante la gala.
### Ejemplo 1: Encontrando Información de Invitados
Veamos cómo Alfred puede ayudarnos con nuestra información de invitados.
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
query = "Cuéntame sobre 'Lady Ada Lovelace'"
response = alfred.run(query)
print("🎩 Respuesta de Alfred:")
print(response)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
Según la información que recuperé, Lady Ada Lovelace es una estimada matemática y amiga. Es reconocida por su trabajo pionero en matemáticas y computación, frecuentemente celebrada como la primera programadora de computadoras debido a su trabajo en el Motor Analítico de Charles Babbage. Su dirección de correo electrónico es ada.lovelace@example.com.
```
</hfoption>
<hfoption id="llama-index">
```python
query = "Cuéntame sobre Lady Ada Lovelace. ¿Cuál es su historia?"
response = await alfred.run(query)
print("🎩 Respuesta de Alfred:")
print(response.response.blocks[0].text)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
Lady Ada Lovelace fue una matemática y escritora inglesa, mejor conocida por su trabajo en el Motor Analítico de Charles Babbage. Fue la primera en reconocer que la máquina tenía aplicaciones más allá del cálculo puro.
```
</hfoption>
<hfoption id="langgraph">
```python
response = alfred.invoke({"messages": "Cuéntame sobre 'Lady Ada Lovelace'"})
print("🎩 Respuesta de Alfred:")
print(response['messages'][-1].content)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
Ada Lovelace, también conocida como Augusta Ada King, Condesa de Lovelace, fue una matemática y escritora inglesa. Nacida el 10 de diciembre de 1815 y fallecida el 27 de noviembre de 1852, es reconocida por su trabajo en el Motor Analítico de Charles Babbage, una computadora mecánica de propósito general. Ada Lovelace es celebrada como una de las primeras programadoras de computadoras porque creó un programa para el Motor Analítico en 1843. Reconoció que la máquina podía usarse para más que simples cálculos, visualizando su potencial de una manera que pocos hicieron en ese momento. Sus contribuciones al campo de la ciencia de la computación sentaron las bases para desarrollos futuros. Un día en octubre, designado como el Día de Ada Lovelace, honra las contribuciones de las mujeres a la ciencia y la tecnología, inspirado en el trabajo pionero de Lovelace.
```
</hfoption>
</hfoptions>
### Ejemplo 2: Verificando el Clima para los Fuegos Artificiales
Veamos cómo Alfred puede ayudarnos con el clima.
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
query = "¿Cómo está el clima en París esta noche? ¿Será adecuado para nuestro espectáculo de fuegos artificiales?"
response = alfred.run(query)
print("🎩 Respuesta de Alfred:")
print(response)
```
Salida esperada (variará debido a la aleatoriedad):
```
🎩 Respuesta de Alfred:
He revisado el clima en París para ti. Actualmente, está despejado con una temperatura de 25°C. Estas condiciones son perfectas para el espectáculo de fuegos artificiales de esta noche. Los cielos despejados proporcionarán una excelente visibilidad para el espectáculo espectacular, y la temperatura agradable asegurará que los invitados puedan disfrutar del evento al aire libre sin incomodidad.
```
</hfoption>
<hfoption id="llama-index">
```python
query = "¿Cómo está el clima en París esta noche? ¿Será adecuado para nuestro espectáculo de fuegos artificiales?"
response = await alfred.run(query)
print("🎩 Respuesta de Alfred:")
print(response)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
El clima en París esta noche es lluvioso con una temperatura de 15°C. Debido a la lluvia, puede que no sea adecuado para un espectáculo de fuegos artificiales.
```
</hfoption>
<hfoption id="langgraph">
```python
response = alfred.invoke({"messages": "¿Cómo está el clima en París esta noche? ¿Será adecuado para nuestro espectáculo de fuegos artificiales?"})
print("🎩 Respuesta de Alfred:")
print(response['messages'][-1].content)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
El clima en París esta noche es lluvioso con una temperatura de 15°C, lo que puede no ser adecuado para tu espectáculo de fuegos artificiales.
```
</hfoption>
</hfoptions>
### Ejemplo 3: Impresionando a Investigadores de IA
Veamos cómo Alfred puede ayudarnos a impresionar a los investigadores de IA.
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
query = "Uno de nuestros invitados es de Qwen. ¿Qué puedes decirme sobre su modelo más popular?"
response = alfred.run(query)
print("🎩 Respuesta de Alfred:")
print(response)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
El modelo más popular de Qwen es Qwen/Qwen2.5-VL-7B-Instruct con 3,313,345 descargas.
```
</hfoption>
<hfoption id="llama-index">
```python
query = "Uno de nuestros invitados es de Google. ¿Qué puedes decirme sobre su modelo más popular?"
response = await alfred.run(query)
print("🎩 Respuesta de Alfred:")
print(response)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
El modelo más popular de Google en Hugging Face Hub es google/electra-base-discriminator, con 28,546,752 descargas.
```
</hfoption>
<hfoption id="langgraph">
```python
response = alfred.invoke({"messages": "Uno de nuestros invitados es de Qwen. ¿Qué puedes decirme sobre su modelo más popular?"})
print("🎩 Respuesta de Alfred:")
print(response['messages'][-1].content)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
El modelo más descargado de Qwen es Qwen/Qwen2.5-VL-7B-Instruct con 3,313,345 descargas.
```
</hfoption>
</hfoptions>
### Ejemplo 4: Combinando Múltiples Herramientas
Veamos cómo Alfred puede ayudarnos a prepararnos para una conversación con el Dr. Nikola Tesla.
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
query = "Necesito hablar con el Dr. Nikola Tesla sobre avances recientes en energía inalámbrica. ¿Puedes ayudarme a prepararme para esta conversación?"
response = alfred.run(query)
print("🎩 Respuesta de Alfred:")
print(response)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
He reunido información para ayudarte a prepararte para tu conversación con el Dr. Nikola Tesla.
Información del Invitado:
Nombre: Dr. Nikola Tesla
Relación: viejo amigo de los días universitarios
Descripción: El Dr. Nikola Tesla es un viejo amigo de tus días universitarios. Recientemente ha patentado un nuevo sistema de transmisión de energía inalámbrica y estaría encantado de discutirlo contigo. Solo recuerda que le apasionan las palomas, así que eso podría ser un buen tema para iniciar la conversación.
Email: nikola.tesla@gmail.com
Avances Recientes en Energía Inalámbrica:
Basándome en mi búsqueda web, aquí hay algunos desarrollos recientes en transmisión de energía inalámbrica:
1. Los investigadores han avanzado en la transmisión de energía inalámbrica de largo alcance utilizando ondas electromagnéticas enfocadas
2. Varias empresas están desarrollando tecnologías de acoplamiento inductivo resonante para electrónica de consumo
3. Hay nuevas aplicaciones en la carga de vehículos eléctricos sin conexiones físicas
Iniciadores de Conversación:
1. "Me encantaría conocer tu nueva patente sobre transmisión de energía inalámbrica. ¿Cómo se compara con tus conceptos originales de nuestros días universitarios?"
2. "¿Has visto los desarrollos recientes en acoplamiento inductivo resonante para electrónica de consumo? ¿Qué opinas de su enfoque?"
3. "¿Cómo están tus palomas? Recuerdo tu fascinación por ellas."
Esto debería darte mucho para discutir con el Dr. Tesla mientras demuestras tu conocimiento de sus intereses y desarrollos recientes en su campo.
```
</hfoption>
<hfoption id="llama-index">
```python
query = "Necesito hablar con el Dr. Nikola Tesla sobre avances recientes en energía inalámbrica. ¿Puedes ayudarme a prepararme para esta conversación?"
response = await alfred.run(query)
print("🎩 Respuesta de Alfred:")
print(response)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
Aquí hay algunos avances recientes en energía inalámbrica que podrían serte útiles para tu conversación con el Dr. Nikola Tesla:
1. **Avances y Desafíos en la Transferencia de Energía Inalámbrica**: Este artículo analiza la evolución de la transferencia de energía inalámbrica (WPT) desde los métodos convencionales con cables hasta las aplicaciones modernas, incluidas las estaciones de energía espacial solar. Destaca el enfoque inicial en la tecnología de microondas y la demanda actual de WPT debido al aumento de dispositivos eléctricos.
2. **Avances Recientes en Tecnologías de Transferencia de Energía Inalámbrica para Electrónica Corporal**: Este artículo explora la transferencia de energía inalámbrica (WET) como solución para alimentar dispositivos electrónicos corporales sin necesidad de baterías o cables conductores. Analiza las ventajas y posibles aplicaciones de WET en este contexto.
3. **Transferencia de Energía Inalámbrica y Captación de Energía: Estado Actual y Tendencias Futuras**: Este artículo proporciona una visión general de los avances recientes en métodos de suministro de energía inalámbrica, incluida la captación de energía y la transferencia de energía inalámbrica. Presenta varias aplicaciones prometedoras y analiza las tendencias futuras en el campo.
4. **Transferencia de Energía Inalámbrica: Aplicaciones, Desafíos, Barreras y
```
</hfoption>
<hfoption id="langgraph">
```python
response = alfred.invoke({"messages":"Necesito hablar con el 'Dr. Nikola Tesla' sobre avances recientes en energía inalámbrica. ¿Puedes ayudarme a prepararme para esta conversación?"})
print("🎩 Respuesta de Alfred:")
print(response['messages'][-1].content)
```
Salida esperada:
```
Basándome en la información proporcionada, aquí hay puntos clave para prepararte para la conversación con el 'Dr. Nikola Tesla' sobre avances recientes en energía inalámbrica:
1. **Transmisión de Energía Inalámbrica (WPT):** Comenta cómo WPT revoluciona la transferencia de energía al eliminar la necesidad de cables y aprovechar mecanismos como el acoplamiento inductivo y resonante.
2. **Avances en Carga Inalámbrica:** Destaca las mejoras en eficiencia, velocidades de carga más rápidas y el auge de soluciones de carga inalámbrica certificadas Qi/Qi2.
3. **Innovaciones 5G-Advanced y Protocolo Inalámbrico NearLink:** Menciona estos como desarrollos que mejoran la velocidad, seguridad y eficiencia en redes inalámbricas, que pueden soportar tecnologías avanzadas de energía inalámbrica.
4. **IA y ML en el Edge:** Habla sobre cómo la inteligencia artificial y el aprendizaje automático dependerán de redes inalámbricas para llevar inteligencia al borde, mejorando la automatización e inteligencia en hogares y edificios inteligentes.
5. **Avances en Matter, Thread y Seguridad:** Comenta estas innovaciones clave que impulsan la conectividad, eficiencia y seguridad en dispositivos y sistemas IoT.
6. **Avances en Tecnología de Carga Inalámbrica:** Incluye avances recientes o estudios, como el de la Universidad Nacional de Incheon, para respaldar los avances en carga inalámbrica.
```
</hfoption>
</hfoptions>
### Ejemplo 3: Impresionando a Investigadores de IA
Veamos cómo Alfred puede ayudarnos a impresionar a los investigadores de IA.
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
query = "Uno de nuestros invitados es de Qwen. ¿Qué puedes decirme sobre su modelo más popular?"
response = alfred.run(query)
print("🎩 Respuesta de Alfred:")
print(response)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
El modelo más popular de Qwen es Qwen/Qwen2.5-VL-7B-Instruct con 3,313,345 descargas.
```
</hfoption>
<hfoption id="llama-index">
```python
query = "Uno de nuestros invitados es de Google. ¿Qué puedes decirme sobre su modelo más popular?"
response = await alfred.run(query)
print("🎩 Respuesta de Alfred:")
print(response)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
El modelo más popular de Google en Hugging Face Hub es google/electra-base-discriminator, con 28,546,752 descargas.
```
</hfoption>
<hfoption id="langgraph">
```python
response = alfred.invoke({"messages": "Uno de nuestros invitados es de Qwen. ¿Qué puedes decirme sobre su modelo más popular?"})
print("🎩 Respuesta de Alfred:")
print(response['messages'][-1].content)
```
Salida esperada:
```
🎩 Respuesta de Alfred:
El modelo más descargado de Qwen es Qwen/Qwen2.5-VL-7B-Instruct con 3,313,345 descargas.
```
</hfoption>
</hfoptions>
## Características Avanzadas: Memoria de Conversación
Para hacer que Alfred sea aún más útil durante la gala, podemos habilitar la memoria de conversación para que recuerde interacciones previas:
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
# Crear Alfred con memoria de conversación
alfred_with_memory = CodeAgent(
tools=[guest_info_tool, weather_info_tool, hub_stats_tool, search_tool],
model=model,
add_base_tools=True,
planning_interval=3,
memory=True # Habilitar memoria de conversación
)
# Primera interacción
response1 = alfred_with_memory.run("Cuéntame sobre Lady Ada Lovelace.")
print("🎩 Primera Respuesta de Alfred:")
print(response1)
# Segunda interacción (haciendo referencia a la primera)
response2 = alfred_with_memory.run("¿En qué proyectos está trabajando actualmente?")
print("🎩 Segunda Respuesta de Alfred:")
print(response2)
```
</hfoption>
<hfoption id="llama-index">
```python
from llama_index.core.workflow import Context
alfred = AgentWorkflow.from_tools_or_functions(
[guest_info_tool, search_tool, weather_info_tool, hub_stats_tool],
llm=llm
)
# Recordando el estado
ctx = Context(alfred)
# Primera interacción
response1 = await alfred.run("Cuéntame sobre Lady Ada Lovelace.", ctx=ctx)
print("🎩 Primera Respuesta de Alfred:")
print(response1)
# Segunda interacción (haciendo referencia a la primera)
response2 = await alfred.run("¿En qué proyectos está trabajando actualmente?", ctx=ctx)
print("🎩 Segunda Respuesta de Alfred:")
print(response2)
```
</hfoption>
<hfoption id="langgraph">
```python
# Primera interacción
response = alfred.invoke({"messages": [HumanMessage(content="Cuéntame sobre 'Lady Ada Lovelace'. ¿Cuál es su historia y cómo está relacionada conmigo?")]})
print("🎩 Respuesta de Alfred:")
print(response['messages'][-1].content)
print()
# Segunda interacción (haciendo referencia a la primera)
response = alfred.invoke({"messages": response["messages"] + [HumanMessage(content="¿En qué proyectos está trabajando actualmente?")]})
print("🎩 Respuesta de Alfred:")
print(response['messages'][-1].content)
```
</hfoption>
</hfoptions>
## Conclusión
¡Felicitaciones! Has construido exitosamente a Alfred, un agente sofisticado equipado con múltiples herramientas para ayudar a organizar la gala más extravagante del siglo. Alfred ahora puede:
1. Recuperar información detallada sobre los invitados
2. Verificar las condiciones climáticas para planificar actividades al aire libre
3. Proporcionar información sobre influyentes creadores de IA y sus modelos
4. Buscar en la web la información más reciente
5. Mantener el contexto de la conversación con memoria
Con estas capacidades, Alfred está listo para asegurar que tu gala sea un éxito rotundo, impresionando a los invitados con atención personalizada e información actualizada.
| agents-course/units/es/unit3/agentic-rag/agent.mdx/0 | {
"file_path": "agents-course/units/es/unit3/agentic-rag/agent.mdx",
"repo_id": "agents-course",
"token_count": 7498
} | 9 |
# Introduction

Bienvenue dans cette première **Unité Bonus**, où vous apprendrez à **finetuner un LLM pour de l'appel de fonctions** (*function calling*).
En termes de LLM, l'appel de fonctions devient rapidement une technique *incontournable*.
L'idée est que, plutôt que de s'appuyer uniquement sur des approches basées sur des *prompts* comme nous l'avons fait dans l'Unité 1, l'appel de fonctions entraîne votre modèle à **prendre des actions et interpréter des observations pendant la phase d'entraînement**, rendant votre IA plus robuste.
> **Quand dois-je faire cette Unité Bonus ?**
>
> Cette section est **optionnelle** et plus avancée que l'Unité 1, donc n'hésitez pas à faire cette unité maintenant ou à la revisiter quand vos connaissances se seront davantage développées grâce à ce cours.
>
> Mais ne vous inquiétez pas, cette Unité Bonus est conçue pour avoir toutes les informations dont vous avez besoin, donc nous vous guiderons à travers chaque concept central du finetuning d'un modèle d'appel de fonctions même si vous n'avez pas encore appris le fonctionnement interne de ce type de finetuning.
La meilleure façon pour vous de pouvoir suivre cette Unité Bonus est de :
1. Savoir comment finetuner un modèle avec *Transformers*. Si ce n'est pas le cas [consultez cette page](https://huggingface.co/learn/nlp-course/fr/chapter3/1?fw=pt).
2. Savoir comment utiliser `SFTTrainer` de *TRL* pour finetuner un modèle. Pour en savoir plus à ce sujet [consultez cette documentation](https://huggingface.co/learn/nlp-course/en/chapter11/1).
---
## Ce que vous allez apprendre
1. **L'appel de fonctions** (*Function Calling*)
Comment les LLM modernes structurent leurs conversations de manière efficace afin de déclencher des **outils**.
2. **LoRA** (*Low-Rank Adaptation*)
Une méthode de finetuning **légère et efficace** qui réduit les coûts computationnels et de stockage. LoRA rend l'entraînement de gros modèles *plus rapide, moins cher et plus facile* à déployer.
3. **Le cycle Réflexion → Action → Observation** dans les modèles d'appel de fonctions
Une approche simple mais puissante pour structurer comment votre modèle décide quand (et comment) appeler des fonctions, suivre les étapes intermédiaires et interpréter les résultats des outils ou APIs externes.
4. **De nouveaux *tokens* spéciaux**
Nous introduirons des **marqueurs spéciaux** qui aident le modèle à distinguer entre :
- Le raisonnement interne "*chain-of-thought*"
- Les appels de fonctions sortants
- Les réponses provenant d'outils externes
---
À la fin de cette unité bonus, vous serez capable de :
- **Comprendre** le fonctionnement interne des APIs quand il s'agit d'outils.
- **Finetuner** un modèle en utilisant la technique LoRA.
- **Implémenter** et **modifier** le cycle Réflexion → Action → Observation pour créer des *workflow* d'appel de fonctions robustes et maintenables.
- **Concevoir et utiliser** des *tokens* spéciaux pour séparer de manière transparente le raisonnement interne du modèle de ses actions externes.
Et vous **aurez finetuné votre propre modèle pour faire de l'appel de fonctions.** 🔥
Plongeons dans **l'appel de fonctions** ! | agents-course/units/fr/bonus-unit1/introduction.mdx/0 | {
"file_path": "agents-course/units/fr/bonus-unit1/introduction.mdx",
"repo_id": "agents-course",
"token_count": 1201
} | 10 |
# Table des matières
Vous pouvez accéder à l'Unité 1 sur hf.co/learn 👉 <a href="https://hf.co/learn/agents-course/unit1/introduction">ici</a>
<!--
| Titre | Description |
|-------|-------------|
| [Définition d'un Agent](1_definition_of_an_agent.md) | Exemple général de ce que les agents peuvent faire sans jargon technique. |
| [Expliquer les LLMs](2_explain_llms.md) | Explication des modèles de langage de grande taille, y compris l'arbre généalogique des modèles et les modèles adaptés aux agents. |
| [Messages et Tokens Spéciaux](3_messages_and_special_tokens.md) | Explication des messages, des tokens spéciaux et de l'utilisation des modèles de conversation. |
| [Bibliothèque d'Agents Factices](4_dummy_agent_library.md) | Introduction à l'utilisation d'une bibliothèque d'agents factices et d'une API sans serveur. |
| [Outils](5_tools.md) | Aperçu de Pydantic pour les outils d'agents et d'autres formats d'outils courants. |
| [Étapes et Structure de l'Agent](6_agent_steps_and_structure.md) | Les étapes impliquées dans un agent, y compris le raisonnement, les actions, les observations, et une comparaison entre les agents basés sur le code et les agents basés sur JSON. |
| [Raisonnement](7_thoughts.md) | Explication du raisonnement et de l'approche ReAct. |
| [Actions](8_actions.md) | Aperçu des actions et de l'approche "stop and parse". |
| [Observations](9_observations.md) | Explication des observations et de l'ajout du résultat pour réflexion. |
| [Quiz](10_quizz.md) | Contient des quiz pour tester la compréhension des concepts. |
| [Cas d'Utilisation Simple](11_simple_use_case.md) | Fournit un exercice de cas d'utilisation simple utilisant datetime et une fonction Python comme outil. |
-->
| agents-course/units/fr/unit1/README.md/0 | {
"file_path": "agents-course/units/fr/unit1/README.md",
"repo_id": "agents-course",
"token_count": 600
} | 11 |
# Introduction aux frameworks agentiques
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/thumbnail.jpg" alt="Thumbnail"/>
Bienvenue dans cette deuxième unité, où **nous explorerons différents frameworks agentiques** qui peuvent être utilisés pour construire de puissantes applications agentiques.
Nous étudierons :
- Dans l'unité 2.1 : [smolagents](https://huggingface.co/docs/smolagents/en/index)
- Dans l'unité 2.2 : [LlamaIndex](https://www.llamaindex.ai/)
- Dans l'unité 2.3 : [LangGraph](https://www.langchain.com/langgraph)
Plongeons-y ! 🕵
## Quand utiliser un *framework* agentique
Parfois, **des *workflows* prédéfinis sont suffisants** pour répondre aux demandes des utilisateurs, et il n'y a pas vraiment besoin d'un *framework* agentique. Si l'approche pour construire un agent est simple, comme une chaîne de *prompts*, utiliser du code simple peut suffire. L'avantage est que le développeur aura **un contrôle total et une compréhension de son système sans abstractions**.
Cependant, lorsque le *workflow* devient plus complexe, comme laisser un LLM appeler des fonctions ou utiliser plusieurs agents, ces abstractions commencent à devenir utiles.
En considérant ces idées, nous pouvons déjà identifier le besoin de certaines fonctionnalités :
* Un *moteur LLM* qui alimente le système.
* Une *liste d'outils* auxquels l'agent peut accéder.
* Un *analyseur* pour extraire les appels d'outils de la sortie du LLM.
* Un *prompt système* synchronisé avec l'analyseur.
* Un *système de mémoire*.
* Des *mécanismes de journalisation des erreurs et de réessai* pour contrôler les erreurs du LLM.
Nous explorerons comment ces sujets sont résolus dans divers *frameworks* comme `smolagents`, `LlamaIndex` et `LangGraph`.
## Unités des frameworks agentiques
| *Framework* | Description | Auteur de l'unité |
|------------|----------------|----------------|
| [smolagents](./smolagents/introduction) | *Framework* d'agents développé par Hugging Face. | Sergio PANIEGO - [HF](https://huggingface.co/sergiopaniego) - [X](https://x.com/sergiopaniego) - [Linkedin](https://www.linkedin.com/in/sergio-paniego-blanco) |
| [Llama-Index](./llama-index/introduction) | Outils de bout en bout pour déployer un agent IA augmenté par le contexte en production | David BERENSTEIN - [HF](https://huggingface.co/davidberenstein1957) - [X](https://x.com/davidberenstei) - [Linkedin](https://www.linkedin.com/in/davidberenstein) |
| [LangGraph](./langgraph/introduction) | Agents permettant l'orchestration étatique des agents | Joffrey THOMAS - [HF](https://huggingface.co/Jofthomas) - [X](https://x.com/Jthmas404) - [Linkedin](https://www.linkedin.com/in/joffrey-thomas) | | agents-course/units/fr/unit2/introduction.mdx/0 | {
"file_path": "agents-course/units/fr/unit2/introduction.mdx",
"repo_id": "agents-course",
"token_count": 991
} | 12 |
# Utiliser les outils dans LlamaIndex
**Définir un ensemble clair d'outils est crucial pour la performance.** Comme nous l'avons discuté dans l'[Unité 1](../../unit1/tools), des interfaces claires sont plus faciles à utiliser pour les LLM.
Tout comme une interface API logicielle pour les ingénieurs humains, ils peuvent tirer profit de l'outil s'il est facile de comprendre comment il fonctionne.
Il y a **quatre types principaux d'outils dans LlamaIndex** :

1. `FunctionTool` : Convertit n'importe quelle fonction Python en un outil qu'un agent peut utiliser. Il comprend automatiquement comment la fonction fonctionne.
2. `QueryEngineTool` : Un outil qui permet aux agents d'utiliser des *query engines*. Puisque les agents sont construits sur des *query engines*, ils peuvent également utiliser d'autres agents comme outil.
3. `Toolspecs` : Ensembles d'outils créés par la communauté, en incluant souvent pour des services spécifiques comme Gmail.
4. `Utility Tools` : Outils spéciaux qui aident à gérer de grandes quantités de données d'autres outils.
Nous passerons en revue chacun d'eux plus en détail ci-dessous.
## Créer un *FunctionTool*
<Tip>
Vous pouvez suivre le code dans <a href="https://huggingface.co/agents-course/notebooks/blob/main/fr/unit2/llama-index/tools.ipynb" target="_blank">ce <i>notebook</i></a> que vous pouvez exécuter avec Google Colab.
</Tip>
*FunctionTool* fournit un moyen simple d'envelopper n'importe quelle fonction Python et de la rendre disponible à un agent.
Vous pouvez passer soit une fonction synchrone soit asynchrone à l'outil, avec des paramètres optionnels `name` et `description`.
Le nom et la description sont particulièrement importants car ils aident l'agent à comprendre quand et comment utiliser l'outil efficacement.
Regardons comment créer un *FunctionTool* ci-dessous puis l'appeler.
```python
from llama_index.core.tools import FunctionTool
def get_weather(location: str) -> str:
"""Utile pour obtenir le temps qu'il fait à un endroit donné."""
print(f"Getting weather for {location}")
return f"The weather in {location} is sunny"
tool = FunctionTool.from_defaults(
get_weather,
name="my_weather_tool",
description="Useful for getting the weather for a given location.",
)
tool.call("New York")
```
<Tip>Lors de l'utilisation d'un agent ou d'un LLM avec l'appel de fonctions, l'outil sélectionné (et les arguments écrits pour celui-ci) dépendent fortement de son nom, t de la description du but et des arguments. Apprenez-en plus sur l'appel de fonctions dans le <a href="https://docs.llamaindex.ai/en/stable/examples/workflow/function_calling_agent/">Guide sur l'appel de fonctions</a>.</Tip>
## Créer un *QueryEngineTool*
Le `QueryEngine` que nous avons défini dans l'unité précédente peut être facilement transformé en un outil en utilisant la classe `QueryEngineTool`.
Voyons comment créer un `QueryEngineTool` à partir d'un `QueryEngine` dans l'exemple ci-dessous.
```python
from llama_index.core import VectorStoreIndex
from llama_index.core.tools import QueryEngineTool
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore
embed_model = HuggingFaceEmbedding("BAAI/bge-small-en-v1.5")
db = chromadb.PersistentClient(path="./alfred_chroma_db")
chroma_collection = db.get_or_create_collection("alfred")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
query_engine = index.as_query_engine(llm=llm)
tool = QueryEngineTool.from_defaults(query_engine, name="some useful name", description="some useful description")
```
## Créer un *Toolspecs*
Pensez au `ToolSpecs` comme des collections d'outils qui fonctionnent ensemble harmonieusement à l'instar d'une boîte à outils professionnelle bien organisée.
Tout comme la boîte à outils d'un mécanicien contient des outils complémentaires qui fonctionnent ensemble pour les réparations de véhicules, un `ToolSpec` combine des outils apparentés pour des objectifs spécifiques.
Par exemple, le `ToolSpec` d'un agent comptable pourrait intégrer élégamment des capacités de tableur, des fonctionnalités email et des outils de calcul pour gérer les tâches financières avec précision et efficacité.
<details>
<summary>Installer le <i>Toolspec</i> Google</summary>
Comme introduit dans la <a href="./llama-hub">section sur le <i>LlamaHub</i></a>, nous pouvons installer le <i>toolspec</i> Google avec la commande suivante :
```python
pip install llama-index-tools-google
```
</details>
Et maintenant nous pouvons charger le *toolspec* et le convertir en une liste d'outils.
```python
from llama_index.tools.google import GmailToolSpec
tool_spec = GmailToolSpec()
tool_spec_list = tool_spec.to_tool_list()
```
Pour obtenir une vue plus détaillée des outils, nous pouvons examiner les `metadata` de chacun d'eux.
```python
[(tool.metadata.name, tool.metadata.description) for tool in tool_spec_list]
```
### *Model Context Protocol* (MCP) dans LlamaIndex
LlamaIndex permet également d'utiliser les outils *MCP* grâce à un [*ToolSpec* sur le LlamaHub](https://llamahub.ai/l/tools/llama-index-tools-mcp?from=).
Vous pouvez simplement lancer un serveur MCP et commencer à l'utiliser grâce à l'implémentation suivante.
Si vous voulez approfondir MCP, vous pouvez consulter notre [cours gratuit](https://huggingface.co/learn/mcp-course/).
<details>
<summary>Installer le <i>Toolspec</i> MCP</summary>
Comme introduit dans la <a href="./llama-hub">section sur le LlamaHub</a>, nous pouvons installer le <i>toolspec</i> MCP avec la commande suivante :
```python
pip install llama-index-tools-mcp
```
</details>
```python
from llama_index.tools.mcp import BasicMCPClient, McpToolSpec
# Nous considérons qu'un serveur mcp tourne sur 127.0.0.1:8000, ou vous pouvez utiliser le client mcp pour vous connecter à votre propre serveur
mcp_client = BasicMCPClient("http://127.0.0.1:8000/sse")
mcp_tool = McpToolSpec(client=mcp_client)
# obtenir l'agent
agent = await get_agent(mcp_tool)
# créer le contexte de l'agent
agent_context = Context(agent)
```
## *Utility Tools*
Souvent, interroger directement une API **peut retourner une quantité excessive de données**, dont certaines peuvent être non pertinentes, déborder la fenêtre de contexte du LLM, ou augmenter inutilement le nombre de *tokens* que vous utilisez.
Passons en revue nos deux *utility tools* principaux ci-dessous.
1. `OnDemandToolLoader` : Cet outil transforme n'importe quel chargeur de données LlamaIndex existant (classe *BaseReader*) en un outil qu'un agent peut utiliser. Il peut être appelé avec tous les paramètres nécessaires pour déclencher `load_data` du chargeur de données, ainsi qu'une chaîne de requête en langage naturel. Pendant l'exécution, nous chargeons d'abord les données du chargeur de données, les indexons (par exemple avec un *vector store*), puis les interrogeons à la demande. Ces trois étapes se produisent dans un seul appel de l'outil.
2. `LoadAndSearchToolSpec` : Le *LoadAndSearchToolSpec* prend n'importe quel outil existant en entrée. En tant que *tool spec*, il implémente `to_tool_list`, et quand cette fonction est appelée, deux outils sont retournés : un outil de chargement puis un outil de recherche. L'exécution du premier appellerait l'outil sous-jacent, puis indexerait la sortie (par défaut avec un *vector index*). L'exécution du second prendrait une chaîne de requête en entrée et appellerait l'index sous-jacent.
<Tip>Vous pouvez trouver des <i>toolspecs</i> et des <i>utility tools</i> sur le <a href="https://llamahub.ai/">LlamaHub</a>.</Tip>
Maintenant que nous comprenons les bases des agents et des outils dans LlamaIndex, voyons comment nous pouvons **utiliser LlamaIndex pour créer des *workflows* configurables et gérables !**
| agents-course/units/fr/unit2/llama-index/tools.mdx/0 | {
"file_path": "agents-course/units/fr/unit2/llama-index/tools.mdx",
"repo_id": "agents-course",
"token_count": 2873
} | 13 |
# RAG agentique
Dans cette unité, nous allons examiner comment nous pouvons utiliser le *RAG agentique* pour aider Alfred à préparer l'incroyable gala.
<Tip>Nous avons déjà discuté du RAG et du RAG agentique dans l'unité précédente, donc n'hésitez pas à passer directement à la suite si vous êtes déjà familier avec ces concepts.</Tip>
Les LLM sont entraînés sur d'énormes corpus de données pour apprendre des connaissances générales.
Cependant, ce modèle de connaissance du monde peut ne pas toujours être des informations pertinentes et à jour.
**Le RAG résout ce problème en trouvant et récupérant des informations pertinentes à partir de vos données et en les transmettant au LLM.**

Maintenant, pensez à comment Alfred fonctionne :
1. Nous lui avons demandé d'aider à planifier un gala
2. Il doit trouver les dernières nouvelles et informations météorologiques
3. Il doit structurer et rechercher les informations sur les invités
Tout comme Alfred a besoin de chercher dans vos informations domestiques pour être utile, tout agent a besoin d'un moyen de trouver et comprendre les données pertinentes.
**Le RAG agentique est un moyen puissant d'utiliser les agents pour répondre aux questions sur vos données.** Nous pouvons passer divers outils à Alfred pour l'aider à répondre aux questions.
Cependant, au lieu de répondre automatiquement à la question sur la base de documents, Alfred peut décider d'utiliser tout autre outil ou flux pour répondre à la question.

Commençons **à construire notre *workflow* de RAG agentique !**
Tout d'abord, nous allons créer un outil de RAG pour récupérer les détails à jour sur les invités. Ensuite, nous développerons des outils pour la recherche web, les mises à jour météorologiques et les statistiques de téléchargement de modèles sur le Hub d'Hugging Face. Enfin, nous intégrerons le tout pour donner vie à notre agent de RAG agentique ! | agents-course/units/fr/unit3/agentic-rag/agentic-rag.mdx/0 | {
"file_path": "agents-course/units/fr/unit3/agentic-rag/agentic-rag.mdx",
"repo_id": "agents-course",
"token_count": 742
} | 14 |
# 액션: 에이전트가 환경과 상호작용할 수 있게 하기 [[actions-enabling-the-agent-to-engage-with-its-environment]]
<Tip>
이 섹션에서는 AI 에이전트가 환경과 상호작용하기 위해 취하는 구체적인 단계를 살펴봅니다.
액션이 어떻게 표현되는지(JSON 또는 코드 사용), 중지 및 구문 분석 접근 방식의 중요성, 그리고 다양한 유형의 에이전트를 소개합니다.
</Tip>
액션은 **AI 에이전트가 환경과 상호작용하기 위해 취하는** 구체적인 단계입니다.
정보를 위해 웹을 검색하든 물리적 장치를 제어하든, 각 액션은 에이전트가 실행하는 의도적인 작업입니다.
예를 들어, 고객 서비스를 지원하는 에이전트는 고객 데이터를 검색하거나, 도움말 문서를 제공하거나, 문제를 인간 담당자에게 이관할 수 있습니다.
## 에이전트 액션의 유형 [[types-of-agent-actions]]
액션을 다르게 취하는 여러 유형의 에이전트가 있습니다:
| 에이전트 유형 | 설명 |
|--------------|------|
| JSON 에이전트 | 취할 액션이 JSON 형식으로 지정됩니다. |
| 코드 에이전트 | 에이전트가 외부에서 해석되는 코드 블록을 작성합니다. |
| 함수 호출 에이전트 | JSON 에이전트의 하위 카테고리로, 각 액션마다 새로운 메시지를 생성하도록 미세 조정되었습니다. |
액션 자체는 다양한 목적을 가질 수 있습니다:
| 액션 유형 | 설명 |
|----------|------|
| 정보 수집 | 웹 검색 수행, 데이터베이스 쿼리, 문서 검색 등 |
| 도구 사용 | API 호출, 계산 실행, 코드 실행 |
| 환경 상호작용 | 디지털 인터페이스 조작 또는 물리적 장치 제어 |
| 의사소통 | 채팅을 통한 사용자와의 상호작용 또는 다른 에이전트와의 협업 |
모든 형식의 에이전트(JSON, 코드, 함수 호출)에 있어 중요한 부분은 **액션이 완료되면 새로운 토큰 생성을 중지하는 능력**입니다. 이는 의도하지 않은 출력을 방지하고 에이전트의 응답이 명확하고 정확하도록 보장합니다.
LLM은 텍스트만 처리하며 이를 사용하여 취하고자 하는 액션과 도구에 제공할 매개변수를 설명합니다.
## 중지 및 구문 분석 접근 방식 [[the-stop-and-parse-approach]]
액션을 구현하는 핵심 방법 중 하나는 **중지 및 구문 분석 접근 방식**입니다. 이 방법은 에이전트의 출력이 구조화되고 예측 가능하도록 보장합니다:
1. **구조화된 형식으로 생성**:
에이전트는 의도한 액션을 명확하고 미리 정의된 형식(JSON 또는 코드)으로 출력합니다.
2. **추가 생성 중지**:
액션이 완료되면 **에이전트는 추가 토큰 생성을 중지**합니다. 이는 불필요하거나 오류가 있는 출력을 방지합니다.
3. **출력 구문 분석**:
외부 파서가 형식화된 액션을 읽고, 어떤 도구를 호출할지 결정하며, 필요한 매개변수를 추출합니다.
예를 들어, 날씨를 확인해야 하는 에이전트는 다음과 같이 출력할 수 있습니다:
```json
Thought: 서울의 현재 날씨를 확인해야 합니다.
Action :
{
"action": "get_weather",
"action_input": {"location": "Seoul"}
}
```
프레임워크는 호출할 함수의 이름과 적용할 인자를 쉽게 구문 분석할 수 있습니다.
이 명확하고 기계가 읽을 수 있는 형식은 오류를 최소화하고 외부 도구가 에이전트의 명령을 정확하게 처리할 수 있게 합니다.
참고: 함수 호출 에이전트는 각 액션을 구조화하여 지정된 함수가 올바른 인수와 함께 호출되도록 하는 비슷한 방식으로 작동합니다.
이러한 유형의 에이전트에 대해서는 향후 유닛에서 더 자세히 살펴볼 것입니다.
## 코드 에이전트 [[code-agents]]
대안적인 접근 방식은 *코드 에이전트*를 사용하는 것입니다.
핵심 아이디어는 **단순한 JSON 객체를 출력하는 대신**, 코드 에이전트가 **실행 가능한 코드 블록(일반적으로 Python과 같은 고수준 언어로 작성)**을 생성한다는 것입니다.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/code-vs-json-actions.png" alt="코드 에이전트" />
이 접근 방식은 여러 장점을 제공합니다:
- **표현력:** 코드는 루프, 조건문, 중첩 함수를 포함한 복잡한 로직을 자연스럽게 표현할 수 있어 JSON보다 더 큰 유연성을 제공합니다.
- **모듈성 및 재사용성:** 생성된 코드는 다양한 액션이나 작업에서 재사용할 수 있는 함수와 모듈을 포함할 수 있습니다.
- **향상된 디버깅 가능성:** 잘 정의된 프로그래밍 구문을 통해 코드 오류를 감지하고 수정하기가 더 쉬운 경우가 많습니다.
- **직접 통합:** 코드 에이전트는 외부 라이브러리 및 API와 직접 통합할 수 있어 데이터 처리나 실시간 의사 결정과 같은 복잡한 작업이 가능합니다.
예를 들어, 날씨 정보를 가져오는 임무를 맡은 코드 에이전트는 다음과 같은 Python 코드 조각을 생성할 수 있습니다:
```python
# 코드 에이전트 예시: 날씨 정보 검색
def get_weather(city):
import requests
api_url = f"https://api.weather.com/v1/location/{city}?apiKey=YOUR_API_KEY"
response = requests.get(api_url)
if response.status_code == 200:
data = response.json()
return data.get("weather", "날씨 정보가 없습니다")
else:
return "오류: 날씨 데이터를 가져올 수 없습니다."
# 함수 실행 및 최종 답변 준비
result = get_weather("Seoul")
final_answer = f"서울의 현재 날씨는: {result}"
print(final_answer)
```
이 예시에서 코드 에이전트는:
- **API 호출**을 통해 날씨 데이터를 검색하고,
- 응답을 처리하며,
- print() 함수를 사용하여 최종 답변을 출력합니다.
이 방법 **또한 중지 및 구문 분석 접근 방식**을 따르며, 코드 블록을 명확하게 구분하고 실행이 완료되었음을 신호합니다(여기서는 final_answer를 출력함으로써).
---
액션은 JSON, 코드 또는 함수 호출을 통해 명확하고 구조화된 작업을 실행함으로써 에이전트의 내부 추론과 실제 상호작용을 연결한다는 점을 배웠습니다.
이러한 의도적인 실행은 각 액션이 정확하고 중지 및 구문 분석 접근 방식을 통해 외부 처리가 가능하도록 보장합니다. 다음 섹션에서는 에이전트가 환경으로부터 피드백을 캡처하고 통합하는 방법을 알아보기 위해 관찰(Observations)에 대해 살펴볼 것입니다.
이후에는 **드디어 우리의 첫 번째 에이전트를 구축할 준비가 됩니다!**
| agents-course/units/ko/unit1/actions.mdx/0 | {
"file_path": "agents-course/units/ko/unit1/actions.mdx",
"repo_id": "agents-course",
"token_count": 5570
} | 15 |
# Заключение [[conclusion]]
Поздравляем вас с завершением этого первого бонусного раздела 🥳.
Вы только что **овладели пониманием вызова функций и тем, как дообучить свою модель вызову функций**!
Если у нас и есть теперь совет, то это попробовать **дообучить другие модели**. **Лучший способ учиться - это пробовать**.
В следующем разделе вы узнаете, как использовать **передовые фреймворки, такие как `smolagents`, `LlamaIndex` и `LangGraph`**.
Наконец, мы хотели бы **узнать, что вы думаете о курсе и как мы можем его улучшить**. Если у вас есть обратная связь, пожалуйста, 👉 [заполните эту форму](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog)
### Продолжайте учиться, оставайтесь потрясающими 🤗 | agents-course/units/ru-RU/bonus-unit1/conclusion.mdx/0 | {
"file_path": "agents-course/units/ru-RU/bonus-unit1/conclusion.mdx",
"repo_id": "agents-course",
"token_count": 733
} | 16 |
# Введение в Агентов
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/thumbnail.jpg" alt="Thumbnail"/>
Добро пожаловать в первый раздел, где **вы заложите прочный фундамент основ ИИ-агентов**, включая:
- **Понимание агентов**
- Что такое агент и как он работает?
- Как агенты принимают решения, используя рассуждения и планирование?
- **Роль LLM (Large Language Models - больших языковых моделей) в агентах**.
- Как LLM служат «мозгом» агента.
- Как LLM структурируют диалоги с помощью системы сообщений.
- **Инструменты (Tools) и Действия (Actions)**.
- Как агенты используют внешние инструменты для взаимодействия с окружающей средой.
- Как создавать и интегрировать инструменты для своего агента.
- **Процесс работы агента (Agent Workflow):**
- *Думать* → *Действовать* → *Наблюдать*.
Изучив эти темы, **вы создадите своего первого агента**, используя `smolagents`!
Ваш агент по имени Альфред справится с простым заданием и продемонстрирует, как применять эти понятия на практике.
Вы даже узнаете, как **опубликовать своего агента в Hugging Face Spaces**, чтобы поделиться им с друзьями и коллегами.
Наконец, в конце этого раздела вы пройдете тест. Пройдя его, вы **получите свой первый сертификат курса**: 🎓 Сертификат по основам работы с агентами.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/certificate-example.jpg" alt="Certificate Example"/>
Этот раздел - ваша **основная отправная точка**, закладывающая основу для понимания Агентов, прежде чем вы перейдете к более сложным темам.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-no-check.jpg" alt="Unit 1 planning"/>
Это большой раздел, поэтому **не торопитесь** и не стесняйтесь время от времени возвращаться к нему.
Готовы? Погружаемся! 🚀
| agents-course/units/ru-RU/unit1/introduction.mdx/0 | {
"file_path": "agents-course/units/ru-RU/unit1/introduction.mdx",
"repo_id": "agents-course",
"token_count": 1625
} | 17 |
# Khi nào các chương tiếp theo được công bố?
Đây là lịch công bố:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/next-units.jpg" alt="Next Units" width="100%"/>
Đừng quên <a href="https://bit.ly/hf-learn-agents">đăng ký khóa học</a>! Khi đăng ký, **chúng mình sẽ gửi bạn link từng chương khi chúng được công bố, cùng thông tin cập nhật và chi tiết về các Bài tập lớn sắp tới**.
Phấn đấu học tập không ngừng 🤗 | agents-course/units/vi/communication/next-units.mdx/0 | {
"file_path": "agents-course/units/vi/communication/next-units.mdx",
"repo_id": "agents-course",
"token_count": 319
} | 18 |
# Hãy tạo Agent đầu tiên của chúng ta với smolagents
Ở chương trước, ta đã học cách tạo Agent từ đầu bằng Python và **thấy quá trình này tốn công thế nào**. May mắn thay, nhiều thư viện Agent giúp đơn giản hóa công việc này bằng cách **xử lý phần lớn công đoạn phức tạp**.
Trong bài thực hành này, **bạn sẽ tạo Agent đầu tiên của riêng mình** có khả năng thực hiện các hành động như tạo ảnh, tìm kiếm web, kiểm tra múi giờ và hơn thế nữa!
Bạn cũng sẽ publish agent **trên Hugging Face Space để chia sẻ với bạn bè và đồng nghiệp**.
Cùng bắt đầu thôi!
## smolagents là gì?
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/smolagents.png" alt="smolagents"/>
Để tạo Agent này, ta sẽ dùng `smolagents` - thư viện **cung cấp framework để phát triển agent dễ dàng**.
Thư viện nhẹ này được thiết kế cho sự đơn giản, nhưng nó đóng gói phần lớn độ phức tạp khi xây dựng Agent, giúp bạn tập trung vào thiết kế hành vi cho agent.
Ta sẽ tìm hiểu sâu hơn về smolagents ở chương tiếp theo. Trong lúc chờ, bạn có thể xem <a href="https://huggingface.co/blog/smolagents" target="_blank">blog post</a> hoặc <a href="https://github.com/huggingface/smolagents" target="_blank">repo GitHub</a> của thư viện.
Tóm lại, `smolagents` là thư viện tập trung vào **codeAgent** - loại agent thực hiện **"Hành động"** qua các khối code, sau đó **"Quan sát"** kết quả bằng cách chạy code.
Đây là ví dụ những gì ta sẽ xây dựng!
Ta cung cấp cho agent **công cụ tạo ảnh** và yêu cầu nó tạo ảnh mèo.
Agent trong `smolagents` sẽ có **hành vi giống với agent tự build trước đây**: nó sẽ **tư duy, hành động và quan sát theo chu kỳ** cho đến khi có câu trả lời cuối:
<iframe width="560" height="315" src="https://www.youtube.com/embed/PQDKcWiuln4?si=ysSTDZoi8y55FVvA" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe>
Thú vị quá phải không?
## Cùng build Agent thôi!
Để bắt đầu, duplicate Space này: <a href="https://huggingface.co/spaces/agents-course/First_agent_template" target="_blank">https://huggingface.co/spaces/agents-course/First_agent_template</a>
> Cảm ơn <a href="https://huggingface.co/m-ric" target="_blank">Aymeric</a> đã tạo template này! 🙌
Duplicate Space nghĩa là **tạo bản copy local trên profile của bạn**:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/duplicate-space.gif" alt="Duplicate"/>
Xuyên suốt Bài học này, file duy nhất bạn cần sửa là **"app.py"** (hiện đang chưa hoàn thiện). Bạn có thể xem [bản gốc trong template](https://huggingface.co/spaces/agents-course/First_agent_template/blob/main/app.py). Để tìm bản của bạn, vào bản copy Space, click tab `Files` rồi chọn `app.py` trong danh sách.
Cùng phân tích code nhé:
- File bắt đầu với các thư viện cần thiết
```python
from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
```
Như đã đề cập, ta sẽ dùng trực tiếp lớp **CodeAgent** từ **smolagents**.
### Các Tools
Giờ đến phần Tools! Nếu cần ôn lại về Tools, hãy xem lại [phần Tools](tools) của khóa học.
```python
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: # quan trọng phải chỉ định kiểu trả về
# Giữ nguyên định dạng này cho mô tả công cụ/mô tả đối số nhưng hãy thoải mái sửa đổi công cụ
"""Công cụ chưa làm gì cả
Args:
arg1: đối số đầu tiên
arg2: đối số thứ hai
"""
return "Bạn sẽ tạo ra phép thuật gì đây?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""Công cụ lấy giờ hiện tại theo múi giờ chỉ định.
Args:
timezone: Chuỗi biểu diễn múi giờ hợp lệ (ví dụ: 'America/New_York').
"""
try:
# Tạo object múi giờ
tz = pytz.timezone(timezone)
# Lấy giờ hiện tại theo múi giờ đó
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"Giờ hiện tại tại {timezone} là: {local_time}"
except Exception as e:
return f"Lỗi khi lấy giờ cho múi giờ '{timezone}': {str(e)}"
```
Đây chính là phần chúng mình khuyến khích bạn xây dựng! Chúng mình cung cấp hai ví dụ:
1. **Tool ảo** chưa hoạt động để bạn có thể sửa thành tool hữu ích.
2. **Tool thực sự hoạt động** để lấy giờ hiện tại ở bất kỳ đâu.
Khi định nghĩa tool, quan trọng phải:
1. Chỉ định kiểu đầu vào/ra cho hàm, ví dụ `get_current_time_in_timezone(timezone: str) -> str:`
2. **Docstring định dạng chuẩn**. `smolagents` yêu cầu mọi đối số phải có **mô tả bằng text trong docstring**.
### Agent
Agent sử dụng [`Qwen/Qwen2.5-Coder-32B-Instruct`](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) làm LLM engine. Đây là mô hình mạnh mẽ mà ta sẽ truy cập qua serverless API.
```python
final_answer = FinalAnswerTool()
model = InferenceClientModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# Tạo CodeAgent
agent = CodeAgent(
model=model,
tools=[final_answer], # thêm tools của bạn vào đây (đừng xóa final_answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch()
```
Agent này vẫn dùng `InferenceClient` mà ta đã thấy ở phần trước thông qua lớp **InferenceClientModel**!
Chúng mình sẽ đưa thêm ví dụ chi tiết khi giới thiệu framework ở chương 2. Hiện tại, bạn cần tập trung vào **thêm tool mới vào danh sách tools** qua tham số `tools` của Agent.
Ví dụ bạn có thể dùng `DuckDuckGoSearchTool` đã được import ở dòng đầu, hoặc xem qua `image_generation_tool` được load từ Hub ở phần sau.
**Thêm tools sẽ mở rộng khả năng cho agent** - hãy sáng tạo nhé!
Toàn bộ "app.py":
```python
from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# Đây là ví dụ tool chưa làm gì. Hãy khiến chúng mình kinh ngạc với sáng tạo của bạn!
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: # quan trọng phải chỉ định kiểu trả về
# Giữ nguyên định dạng này cho mô tả công cụ/mô tả đối số nhưng hãy thoải mái sửa đổi công cụ
"""Công cụ chưa làm gì cả
Args:
arg1: đối số đầu tiên
arg2: đối số thứ hai
"""
return "Bạn sẽ tạo ra phép thuật gì đây?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""Công cụ lấy giờ hiện tại theo múi giờ chỉ định.
Args:
timezone: Chuỗi biểu diễn múi giờ hợp lệ (ví dụ: 'America/New_York').
"""
try:
# Tạo object múi giờ
tz = pytz.timezone(timezone)
# Lấy giờ hiện tại theo múi giờ đó
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"Giờ hiện tại tại {timezone} là: {local_time}"
except Exception as e:
return f"Lỗi khi lấy giờ cho múi giờ '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
model = InferenceClientModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
# Import tool từ Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer], # thêm tools của bạn vào đây (đừng xóa final_answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch()
```
**Mục tiêu** của bạn là làm quen với Space và Agent.
Hiện tại, Agent trong template **chưa dùng tool nào**, hãy thử cung cấp cho nó các tool có sẵn hoặc tự tạo tool mới!
Chúng mình rất mong chờ thành quả Agent tuyệt vời của bạn ở kênh discord **#agents-course-showcase**!
---
Chúc mừng bạn đã build xong Agent đầu tiên! Đừng ngại chia sẻ với bạn bè và đồng nghiệp nhé.
Vì đây là lần đầu thử nghiệm, việc Agent có lỗi nhỏ hoặc chạy chậm là hoàn toàn bình thường. Ở các chương sau, ta sẽ học cách xây dựng Agent tốt hơn.
Cách học tốt nhất là thực hành, nên đừng ngần ngại cập nhật nó, thêm tool mới, thử với mô hình khác, v.v.
Ở phần tiếp theo, bạn sẽ hoàn thành Bài kiểm tra cuối cùng và nhận chứng chỉ! | agents-course/units/vi/unit1/tutorial.mdx/0 | {
"file_path": "agents-course/units/vi/unit1/tutorial.mdx",
"repo_id": "agents-course",
"token_count": 5528
} | 19 |
# 后续单元发布时间表及常见问题解答
课程单元发布时间安排如下:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/next-units.jpg" alt="下一单元" width="100%"/>
请务必 <a href="https://bit.ly/hf-learn-agents">完成课程注册</a>! 完成注册后, **我们将随单元发布进度为您推送专属学习链接,同步更新挑战任务详情及课程动态**。
持续精进,成就卓越 🤗 | agents-course/units/zh-CN/communication/next-units.mdx/0 | {
"file_path": "agents-course/units/zh-CN/communication/next-units.mdx",
"repo_id": "agents-course",
"token_count": 290
} | 20 |
# 什么是工具?
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-check-2.jpg" alt="Unit 1 planning"/>
AI 智能体的关键能力在于执行**行动**。正如前文所述,这通过**工具**的使用实现。
本节将学习工具的定义、有效设计方法,以及如何通过系统消息将其集成到智能体中。
通过为智能体配备合适的工具——并清晰描述这些工具的工作原理——可显著提升 AI 的能力边界。让我们深入探讨!
## AI 工具的定义
**工具是赋予 LLM 的函数**,该函数应实现**明确的目标**。
以下是 AI 智能体中常用的工具示例:
| 工具类型 | 描述 |
|------------------|---------------------------------------------------------------|
| 网络搜索 | 允许智能体从互联网获取最新信息 |
| 图像生成 | 根据文本描述生成图像 |
| 信息检索 | 从外部源检索信息 |
| API 接口 | 与外部 API 交互(GitHub、YouTube、Spotify 等) |
以上仅为示例,实际可为任何用例创建工具!
优秀工具应能**补充 LLM 的核心能力**。
例如,若需执行算术运算,为 LLM 提供**计算器工具**将比依赖模型原生能力获得更好结果。
此外,**LLM 基于训练数据预测提示的补全**,意味着其内部知识仅包含训练截止前的信息。因此,若智能体需要最新数据,必须通过工具获取。
例如,若直接询问 LLM(无搜索工具)今日天气,LLM 可能会产生随机幻觉。
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/weather.jpg" alt="Weather"/>
- 合格工具应包含:
- **函数功能的文本描述**
- *可调用对象*(执行操作的实体)
- 带类型声明的*参数*
- (可选)带类型声明的输出
## 工具如何运作?
正如前文所述,LLM 只能接收文本输入并生成文本输出。它们无法自行调用工具。当我们谈及_为智能体提供工具_时,实质是**教导** LLM 认识工具的存在,并要求模型在需要时生成调用工具的文本。例如,若我们提供从互联网获取某地天气的工具,当询问 LLM 巴黎天气时,LLM 将识别该问题适合使用我们教授的"天气"工具,并生成代码形式的文本来调用该工具。**智能体**负责解析 LLM 的输出,识别工具调用需求,并执行工具调用。工具的输出将返回给 LLM,由其生成最终用户响应。
工具调用的输出是对话中的另一种消息类型。工具调用步骤通常对用户不可见:智能体检索对话、调用工具、获取输出、将其作为新消息添加,并将更新后的对话再次发送给 LLM。从用户视角看,仿佛 LLM 直接使用了工具,但实际执行的是我们的应用代码(**智能体**)。
后续课程将深入探讨该流程。
## 如何为 LLM 提供工具?
完整答案可能看似复杂,但核心是通过系统提示(system prompt)向模型文本化描述可用工具:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/Agent_system_prompt.png" alt="System prompt for tools"/>
为确保有效性,必须精准描述:
1. **工具功能**
2. **预期输入格式**
因此工具描述通常采用结构化表达方式(如编程语言或 JSON)。虽非强制,但任何精确、连贯的格式均可。
若觉抽象,我们通过具体示例理解。
我们将实现简化的**计算器**工具,仅执行两整数相乘。Python 实现如下:
```python
def calculator(a: int, b: int) -> int:
"""Multiply two integers."""
return a * b
```
因此我们的工具名为`calculator`,其功能是**将两个整数相乘**,需要以下输入:
- **`a`**(*int*):整数
- **`b`**(*int*):整数
工具输出为另一个整数,描述如下:
- (*int*):`a`与`b`的乘积
所有这些细节都至关重要。让我们将这些信息整合成 LLM 可理解的工具描述文本:
```text
工具名称: calculator,描述:将两个整数相乘。参数:a: int, b: int,输出:int
```
> **重要提示:** 此文本描述是*我们希望 LLM 了解的工具体系*。
当我们将上述字符串作为输入的一部分传递给 LLM 时,模型将识别其为工具,并知晓需要传递的输入参数及预期输出。
若需提供更多工具,必须保持格式一致性。此过程可能较为脆弱,容易遗漏某些细节。
是否有更好的方法?
### 自动化工具描述生成
我们的工具采用 Python 实现,其代码已包含所需全部信息:
- 功能描述性名称:`calculator`
- 详细说明(通过函数文档字符串实现):`将两个整数相乘`
- 输入参数及类型:函数明确要求两个`int`类型参数
- 输出类型
这正是人们使用编程语言的原因:表达力强、简洁且精确。
虽然可以将 Python 源代码作为工具规范提供给 LLM,但具体实现方式并不重要。关键在于工具名称、功能描述、输入参数和输出类型。
我们将利用 Python 的**自省特性**,通过源代码自动构建工具描述。只需确保工具实现满足:
1. 使用类型注解(Type Hints)
2. 编写文档字符串(Docstrings)
3. 采用合理的函数命名
完成这些之后,我们只需使用一个 Python 装饰器来指示`calculator`函数是一个工具:
```python
@tool
def calculator(a: int, b: int) -> int:
"""Multiply two integers."""
return a * b
print(calculator.to_string())
```
注意函数定义前的`@tool`装饰器。
通过我们即将看到的实现,可以利用装饰器提供的`to_string()`方法从源代码自动提取以下文本:
```text
工具名称: calculator,描述:将两个整数相乘。参数:a: int, b: int,输出:int
```
正如所见,这与我们之前手动编写的内容完全一致!
### 通用工具类实现
我们创建通用`Tool`类,可在需要时重复使用:
> **说明:** 此示例实现为虚构代码,但高度模拟了主流工具库的实际实现方式。
```python
class Tool:
"""
A class representing a reusable piece of code (Tool).
Attributes:
name (str): Name of the tool.
description (str): A textual description of what the tool does.
func (callable): The function this tool wraps.
arguments (list): A list of argument.
outputs (str or list): The return type(s) of the wrapped function.
"""
def __init__(self,
name: str,
description: str,
func: callable,
arguments: list,
outputs: str):
self.name = name
self.description = description
self.func = func
self.arguments = arguments
self.outputs = outputs
def to_string(self) -> str:
"""
Return a string representation of the tool,
including its name, description, arguments, and outputs.
"""
args_str = ", ".join([
f"{arg_name}: {arg_type}" for arg_name, arg_type in self.arguments
])
return (
f"Tool Name: {self.name},"
f" Description: {self.description},"
f" Arguments: {args_str},"
f" Outputs: {self.outputs}"
)
def __call__(self, *args, **kwargs):
"""
Invoke the underlying function (callable) with provided arguments.
"""
return self.func(*args, **kwargs)
```
虽然看似复杂,但逐步解析即可理解其工作机制。我们定义的**`Tool`**类包含以下核心要素:
- **`name`**(*str*):工具名称
- **`description`**(*str*):工具功能简述
- **`function`**(*callable*):工具执行的函数
- **`arguments`**(*list*):预期输入参数列表
- **`outputs`**(*str* 或 *list*):工具预期输出
- **`__call__()`**:调用工具实例时执行函数
- **`to_string()`**:将工具属性转换为文本描述
可通过如下代码创建工具实例:
```python
calculator_tool = Tool(
"calculator", # name
"Multiply two integers.", # description
calculator, # function to call
[("a", "int"), ("b", "int")], # inputs (names and types)
"int", # output
)
```
但我们可以利用 Python 的`inspect`模块自动提取这些信息!这正是`@tool`装饰器的实现原理。
> 若感兴趣,可展开以下内容查看装饰器具体实现:
<details>
<summary> decorator code</summary>
```python
def tool(func):
"""
A decorator that creates a Tool instance from the given function.
"""
# Get the function signature
signature = inspect.signature(func)
# Extract (param_name, param_annotation) pairs for inputs
arguments = []
for param in signature.parameters.values():
annotation_name = (
param.annotation.__name__
if hasattr(param.annotation, '__name__')
else str(param.annotation)
)
arguments.append((param.name, annotation_name))
# Determine the return annotation
return_annotation = signature.return_annotation
if return_annotation is inspect._empty:
outputs = "No return annotation"
else:
outputs = (
return_annotation.__name__
if hasattr(return_annotation, '__name__')
else str(return_annotation)
)
# Use the function's docstring as the description (default if None)
description = func.__doc__ or "No description provided."
# The function name becomes the Tool name
name = func.__name__
# Return a new Tool instance
return Tool(
name=name,
description=description,
func=func,
arguments=arguments,
outputs=outputs
)
```
</details>
简而言之,在应用此装饰器后,我们可以按如下方式实现工具:
```python
@tool
def calculator(a: int, b: int) -> int:
"""Multiply two integers."""
return a * b
print(calculator.to_string())
```
我们可以使用`Tool`类的`to_string`方法自动生成适合LLM使用的工具描述文本:
```text
工具名称: calculator,描述:将两个整数相乘。参数:a: int, b: int,输出:int
```
该描述将被**注入**系统提示。以本节初始示例为例,替换`tools_description`后的系统提示如下:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/Agent_system_prompt_tools.png" alt="System prompt for tools"/>
在[Actions](actions)章节,我们将深入探讨智能体如何**调用**刚创建的这个工具。
### 模型上下文协议(MCP):统一的工具接口
模型上下文协议(MCP)是一种开放式协议,它规范了应用程序向 LLM 提工具的方式。
MCP 提供
- 不断增加的预构建集成列表,您的 LLM 可以直接接入这些集成
- 在 LLM 提供商和供应商之间灵活切换的能力
- 在基础设施内保护数据安全的最佳实践
这意味着任何实施 MCP 的框架都可以利用协议中定义的工具,从而无需为每个框架重新实现相同的工具接口。
---
工具在增强AI智能体能力方面至关重要。
总结本节要点:
- *工具定义*:通过提供清晰的文本描述、输入参数、输出结果及可调用函数
- *工具本质*:赋予LLM额外能力的函数(如执行计算或访问外部数据)
- *工具必要性*:帮助智能体突破静态模型训练的局限,处理实时任务并执行专业操作
现在进入【智能体工作流】(agent-steps-and-structure)章节,您将看到智能体如何观察、思考与行动。这**整合了当前所学全部内容**,为创建功能完备的 AI 智能体奠定基础。
但在此之前,让我们先完成另一个简短测验!
| agents-course/units/zh-CN/unit1/tools.mdx/0 | {
"file_path": "agents-course/units/zh-CN/unit1/tools.mdx",
"repo_id": "agents-course",
"token_count": 7598
} | 21 |
# LlamaIndex 简介
欢迎来到本模块,您将学习如何使用 [LlamaIndex](https://www.llamaindex.ai/) 工具包构建基于大语言模型(LLM)的智能体。
LlamaIndex 是**通过索引和工作流在您的数据上创建 LLM 驱动智能体的完整工具包**。本课程我们将重点关注构建 LlamaIndex 智能体的三个核心部分:**组件**、**智能体与工具**以及**工作流**。

让我们解析这些核心要素及其对智能体的支持:
- **组件**是 LlamaIndex 中的基础构建模块,包括提示词、模型和数据库等。组件通常帮助 LlamaIndex 与其他工具和库进行集成。
- **工具**:提供特定功能的组件,如搜索、计算或访问外部服务。它们是支撑智能体执行任务的基础模块。
- **智能体**:能够自主使用工具并做出决策的独立组件,通过协调工具使用来实现复杂目标。
- **工作流**:按步骤处理逻辑的流程。智能体工作流是一种无需显式使用智能体即可构建智能行为的方式。
## LlamaIndex 的独特之处
虽然LlamaIndex与其他框架(如 smolagents)有相似之处,但具备以下关键优势:
- **清晰的工作流系统**。通过事件驱动和异步优先的语法,工作流帮助您逐步分解智能体的决策过程,实现逻辑的清晰组合与组织。
- **基于 LlamaParse 的高级文档解析** 专为 LlamaIndex 打造的文档解析工具,尽管是付费功能,但提供无缝集成体验。
- **丰富的即用组件** 凭借长期的技术积累,LlamaIndex 与众多框架兼容,提供大量经过验证的可靠组件(如 LLM、检索器、索引等)。
- **LlamaHub** 提供了数百个此类组件、智能体和工具的注册中心,方便您在 LlamaIndex 中直接使用。
这些概念在不同场景中都是创建实用智能体的必要元素。
后续章节我们将深入解析每个概念。
掌握这些知识后,我们将运用所学**通过 Alfred 智能体创建实际应用案例**!
准备好探索 LlamaIndex 的精彩世界了吗?让我们立即启程,**通过 LlamaHub 查找并安装所需集成吧!🚀** | agents-course/units/zh-CN/unit2/llama-index/introduction.mdx/0 | {
"file_path": "agents-course/units/zh-CN/unit2/llama-index/introduction.mdx",
"repo_id": "agents-course",
"token_count": 1549
} | 22 |
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/vision_agents.ipynb"},
]} />
# 使用 smolagents 构建视觉智能体
<Tip warning={true}>
本节示例需要接入强大的视觉语言模型(VLM)。我们使用 GPT-4o API 进行了测试。
若需了解 smolagents 和 Hugging Face 支持的其他替代方案,请参考<a href="./why_use_smolagents">为什么选择smolagents</a>章节。
</Tip>
赋予智能体视觉能力对于超越文本处理的任务至关重要。网页浏览、文档理解等现实场景都需要解析丰富的视觉内容。smolagents 内置支持视觉语言模型(VLMs),使智能体能够有效处理图像信息。
假设韦恩庄园的管家 Alfred 需要核验派对嘉宾身份。考虑到他可能无法识别所有来宾,我们可以构建基于 VLM 的智能体,通过视觉信息检索来辅助身份验证决策。以下是具体实现:
## 初始执行阶段提供图像
<Tip>
配套代码可在<a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/vision_agents.ipynb" target="_blank">Google Colab 笔记本</a>中查看。
</Tip>
该方法在智能体启动时通过 task_images 参数传入图像,智能体在执行过程中持续处理这些图像。
假设 Alfred 需要核验超级英雄身份,他已有历史派对嘉宾图像数据库。
当新访客到来时,智能体可通过图像比对进行准入决策。
当前场景中,Alfred 怀疑访客可能是小丑假扮的神奇女侠。我们需要构建身份验证系统:
```python
from PIL import Image
import requests
from io import BytesIO
image_urls = [
"https://upload.wikimedia.org/wikipedia/commons/e/e8/The_Joker_at_Wax_Museum_Plus.jpg", # 小丑图像
"https://upload.wikimedia.org/wikipedia/en/9/98/Joker_%28DC_Comics_character%29.jpg" # 小丑图像
]
images = []
for url in image_urls:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
}
response = requests.get(url,headers=headers)
image = Image.open(BytesIO(response.content)).convert("RGB")
images.append(image)
```
完成图像加载后,智能体将判断访客身份:究竟是超级英雄(Wonder Woman)还是反派角色(The Joker)。
```python
from smolagents import CodeAgent, OpenAIServerModel
model = OpenAIServerModel(model_id="gpt-4o")
# 实例化智能体
agent = CodeAgent(
tools=[],
model=model,
max_steps=20,
verbosity_level=2
)
response = agent.run(
"""
Describe the costume and makeup that the comic character in these photos is wearing and return the description.
Tell me if the guest is The Joker or Wonder Woman.
""",
images=images
)
```
以下是我的运行结果(实际输出可能因环境差异有所不同,正如前文所述):
```python
{
'Costume and Makeup - First Image': (
'Purple coat and a purple silk-like cravat or tie over a mustard-yellow shirt.',
'White face paint with exaggerated features, dark eyebrows, blue eye makeup, red lips forming a wide smile.'
),
'Costume and Makeup - Second Image': (
'Dark suit with a flower on the lapel, holding a playing card.',
'Pale skin, green hair, very red lips with an exaggerated grin.'
),
'Character Identity': 'This character resembles known depictions of The Joker from comic book media.'
}
```
在这种情况下,输出结果揭示了这个人正在冒充他人,因此我们可以阻止 The Joker 进入派对!
## 提供动态检索图像
<Tip>
您可以在 <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/vision_web_browser.py" target="_blank">这个 Python 文件</a> 中查看代码。
</Tip>
前面的方法具有很高的价值,并且有许多潜在的应用场景。然而,在客人不在数据库中的情况下,我们需要探索其他识别方式。一种可能的解决方案是从外部来源动态检索图像和信息,例如通过浏览网页获取详细信息。
在此方法中,图像是在执行过程中动态添加到智能体的记忆中的。我们知道,`smolagents` 中的智能体基于 `MultiStepAgent` 类,该类是 ReAct 框架的抽象。此类以结构化的周期运行,在不同阶段记录各种变量和知识:
1. **SystemPromptStep:** 存储系统提示。
2. **TaskStep:** 记录用户查询和提供的任何输入。
3. **ActionStep:** 捕获智能体操作和结果的日志。
这种结构化的方法使智能体能够动态地结合视觉信息,并对不断变化的任务做出适应性响应。以下是已经见过的图表,展示了动态工作流程过程以及不同步骤如何在智能体生命周期内集成。在浏览时,智能体可以截取屏幕截图并将其保存为 `ActionStep` 中的 `observation_images`。

现在我们理解了需求,让我们构建完整的示例。在这种情况下,Alfred 希望完全控制访客验证过程,因此浏览详情成为可行的解决方案。为了完成这个示例,我们需要为智能体提供一组新的工具。此外,我们将使用 Selenium 和 Helium,这些是浏览器自动化工具。这将使我们能够构建一个探索网络、搜索潜在访客详情并检索验证信息的智能体。让我们安装所需的工具:
```bash
pip install "smolagents[all]" helium selenium python-dotenv
```
我们需要一组专为浏览设计的智能体工具,例如“search_item_ctrl_f”、“go_back”和“close_popups”。这些工具允许智能体像浏览网页的人一样行事。
```python
@tool
def search_item_ctrl_f(text: str, nth_result: int = 1) -> str:
"""
Searches for text on the current page via Ctrl + F and jumps to the nth occurrence.
Args:
text: The text to search for
nth_result: Which occurrence to jump to (default: 1)
"""
elements = driver.find_elements(By.XPATH, f"//*[contains(text(), '{text}')]")
if nth_result > len(elements):
raise Exception(f"Match n°{nth_result} not found (only {len(elements)} matches found)")
result = f"Found {len(elements)} matches for '{text}'."
elem = elements[nth_result - 1]
driver.execute_script("arguments[0].scrollIntoView(true);", elem)
result += f"Focused on element {nth_result} of {len(elements)}"
return result
@tool
def go_back() -> None:
"""Goes back to previous page."""
driver.back()
@tool
def close_popups() -> str:
"""
Closes any visible modal or pop-up on the page. Use this to dismiss pop-up windows! This does not work on cookie consent banners.
"""
webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()
```
我们还需要保存屏幕截图的功能,因为这是我们的 VLM 智能体完成任务时必不可少的一部分。此功能会捕获屏幕截图并将其保存在 `step_log.observations_images = [image.copy()]` 中,从而允许智能体在导航时动态存储和处理图像。
```python
def save_screenshot(step_log: ActionStep, agent: CodeAgent) -> None:
sleep(1.0) # 让 JavaScript 动画在截图之前完成
driver = helium.get_driver()
current_step = step_log.step_number
if driver is not None:
for step_logs in agent.logs: # 从日志中删除先前的截图以进行精简处理
if isinstance(step_log, ActionStep) and step_log.step_number <= current_step - 2:
step_logs.observations_images = None
png_bytes = driver.get_screenshot_as_png()
image = Image.open(BytesIO(png_bytes))
print(f"Captured a browser screenshot: {image.size} pixels")
step_log.observations_images = [image.copy()] # 创建副本以确保其持久保存,重要!!
# 使用当前 URL 更新观察结果
url_info = f"Current url: {driver.current_url}"
step_log.observations = url_info if step_logs.observations is None else step_log.observations + "\n" + url_info
return
```
此函数作为 `step_callback` 传递给智能体,因为它在智能体执行的每一步结束时被触发。这使得智能体能够在整个过程中动态捕获和存储屏幕截图。
现在,我们可以生成用于浏览网页的视觉智能体,为其提供我们创建的工具,以及 `DuckDuckGoSearchTool` 以探索网页。此工具将帮助智能体根据视觉线索检索验证访客身份所需的信息。
```python
from smolagents import CodeAgent, OpenAIServerModel, DuckDuckGoSearchTool
model = OpenAIServerModel(model_id="gpt-4o")
agent = CodeAgent(
tools=[DuckDuckGoSearchTool(), go_back, close_popups, search_item_ctrl_f],
model=model,
additional_authorized_imports=["helium"],
step_callbacks=[save_screenshot],
max_steps=20,
verbosity_level=2,
)
```
有了这些,Alfred 准备检查访客的身份,并根据这些信息做出是否允许他们进入派对的明智决定:
```python
agent.run("""
I am Alfred, the butler of Wayne Manor, responsible for verifying the identity of guests at party. A superhero has arrived at the entrance claiming to be Wonder Woman, but I need to confirm if she is who she says she is.
Please search for images of Wonder Woman and generate a detailed visual description based on those images. Additionally, navigate to Wikipedia to gather key details about her appearance. With this information, I can determine whether to grant her access to the event.
""" + helium_instructions)
```
您可以看到,我们将 `helium_instructions` 作为任务的一部分包含在内。这个特殊的提示旨在控制智能体的导航,确保它在浏览网页时遵循正确的步骤。
让我们看看这在下面的视频中是如何工作的:
<iframe width="560" height="315" src="https://www.youtube.com/embed/rObJel7-OLc?si=TnNwQ8rqXqun_pqE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe>
这是最终输出:
```python
Final answer: Wonder Woman is typically depicted wearing a red and gold bustier, blue shorts or skirt with white stars, a golden tiara, silver bracelets, and a golden Lasso of Truth. She is Princess Diana of Themyscira, known as Diana Prince in the world of men.
```
通过这些步骤,我们成功地为派对创建了一个身份验证系统! Alfred 现在拥有必要的工具,可以确保只有正确的宾客能够进入庄园。一切准备就绪,可以享受在韦恩庄园的美好时光!
## 进一步阅读
- [我们让 smolagents 有了视觉能力](https://huggingface.co/blog/smolagents-can-see) - 博客文章描述了视觉智能体的功能。
- [使用智能体进行网页浏览 🤖🌐](https://huggingface.co/docs/smolagents/examples/web_browser) - 使用视觉智能体进行网页浏览的示例。
- [网页浏览视觉智能体示例](https://github.com/huggingface/smolagents/blob/main/src/smolagents/vision_web_browser.py) - 使用视觉智能体进行网页浏览的示例。 | agents-course/units/zh-CN/unit2/smolagents/vision_agents.mdx/0 | {
"file_path": "agents-course/units/zh-CN/unit2/smolagents/vision_agents.mdx",
"repo_id": "agents-course",
"token_count": 6002
} | 23 |
# Introduction
{{#include ../../README.md:goals}}
{{#include ../../README.md:features}}
This book will introduce step by step how to use `candle`. | candle/candle-book/src/README.md/0 | {
"file_path": "candle/candle-book/src/README.md",
"repo_id": "candle",
"token_count": 49
} | 24 |
# Candle MNIST Tutorial
## Modeling
Open `src/main.rs` in your project folder and insert the following code:
```rust
use candle_core::{Device, Result, Tensor};
struct Model {
first: Tensor,
second: Tensor,
}
impl Model {
fn forward(&self, image: &Tensor) -> Result<Tensor> {
let x = image.matmul(&self.first)?;
let x = x.relu()?;
x.matmul(&self.second)
}
}
fn main() -> Result<()> {
// Use Device::new_cuda(0)?; to utilize GPU acceleration.
let device = Device::Cpu;
let first = Tensor::randn(0f32, 1.0, (784, 100), &device)?;
let second = Tensor::randn(0f32, 1.0, (100, 10), &device)?;
let model = Model { first, second };
let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?;
let digit = model.forward(&dummy_image)?;
println!("Digit {digit:?} digit");
Ok(())
}
```
Execute the program with:
```bash
$ cargo run --release
> Digit Tensor[dims 1, 10; f32] digit
```
Since random inputs are provided, expect an incoherent output.
## Implementing a `Linear` Layer
To create a more sophisticated layer type, add a `bias` to the weight to construct the standard `Linear` layer.
Replace the entire content of `src/main.rs` with:
```rust
use candle_core::{Device, Result, Tensor};
struct Linear {
weight: Tensor,
bias: Tensor,
}
impl Linear {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = x.matmul(&self.weight)?;
x.broadcast_add(&self.bias)
}
}
struct Model {
first: Linear,
second: Linear,
}
impl Model {
fn forward(&self, image: &Tensor) -> Result<Tensor> {
let x = self.first.forward(image)?;
let x = x.relu()?;
self.second.forward(&x)
}
}
fn main() -> Result<()> {
// Use Device::new_cuda(0)?; for GPU acceleration.
// Use Device::Cpu; for CPU computation.
let device = Device::cuda_if_available(0)?;
// Initialize model parameters
let weight = Tensor::randn(0f32, 1.0, (784, 100), &device)?;
let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?;
let first = Linear { weight, bias };
let weight = Tensor::randn(0f32, 1.0, (100, 10), &device)?;
let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?;
let second = Linear { weight, bias };
let model = Model { first, second };
let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?;
// Perform inference
let digit = model.forward(&dummy_image)?;
println!("Digit {digit:?} digit");
Ok(())
}
```
Execute again with:
```bash
$ cargo run --release
> Digit Tensor[dims 1, 10; f32] digit
```
## Utilizing `candle_nn`
Many classical layers (such as [Linear](https://github.com/huggingface/candle/blob/main/candle-nn/src/linear.rs)) are already implemented in [candle-nn](https://github.com/huggingface/candle/tree/main/candle-nn).
This `Linear` implementation follows PyTorch conventions for improved compatibility with existing models, utilizing the transpose of weights rather than direct weights.
Let's simplify our implementation. First, add `candle-nn` as a dependency:
```bash
$ cargo add --git https://github.com/huggingface/candle.git candle-nn
```
Now, replace the entire content of `src/main.rs` with:
```rust
use candle_core::{Device, Result, Tensor};
use candle_nn::{Linear, Module};
struct Model {
first: Linear,
second: Linear,
}
impl Model {
fn forward(&self, image: &Tensor) -> Result<Tensor> {
let x = self.first.forward(image)?;
let x = x.relu()?;
self.second.forward(&x)
}
}
fn main() -> Result<()> {
// Use Device::new_cuda(0)?; for GPU acceleration.
let device = Device::Cpu;
// Note the dimension change: (784, 100) -> (100, 784)
let weight = Tensor::randn(0f32, 1.0, (100, 784), &device)?;
let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?;
let first = Linear::new(weight, Some(bias));
let weight = Tensor::randn(0f32, 1.0, (10, 100), &device)?;
let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?;
let second = Linear::new(weight, Some(bias));
let model = Model { first, second };
let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?;
let digit = model.forward(&dummy_image)?;
println!("Digit {digit:?} digit");
Ok(())
}
```
Execute the final version:
```bash
$ cargo run --release
> Digit Tensor[dims 1, 10; f32] digit
``` | candle/candle-book/src/guide/mnist/modeling.md/0 | {
"file_path": "candle/candle-book/src/guide/mnist/modeling.md",
"repo_id": "candle",
"token_count": 1740
} | 25 |
[package]
name = "candle-core"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
readme = "README.md"
[dependencies]
accelerate-src = { workspace = true, optional = true }
byteorder = { workspace = true }
candle-kernels = { workspace = true, optional = true }
candle-metal-kernels = { workspace = true, optional = true }
metal = { workspace = true, optional = true }
cudarc = { workspace = true, optional = true }
gemm = { workspace = true }
half = { workspace = true }
float8 = { workspace = true }
intel-mkl-src = { workspace = true, optional = true }
libc = { workspace = true, optional = true }
memmap2 = { workspace = true }
num-traits = { workspace = true }
num_cpus = { workspace = true }
rand = { workspace = true }
rand_distr = { workspace = true }
rayon = { workspace = true }
safetensors = { workspace = true }
thiserror = { workspace = true }
ug-cuda = { workspace = true, optional = true }
ug-metal = { workspace = true, optional = true }
yoke = { workspace = true }
zip = { workspace = true }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
ug = { workspace = true }
[dev-dependencies]
anyhow = { workspace = true }
clap = { workspace = true }
criterion = { workspace = true }
[features]
default = []
cuda = ["cudarc", "dep:candle-kernels", "dep:ug-cuda", "float8/cuda"]
cudnn = ["cuda", "cudarc/cudnn"]
mkl = ["dep:libc", "dep:intel-mkl-src"]
accelerate = ["dep:libc", "dep:accelerate-src"]
metal = ["dep:metal", "dep:candle-metal-kernels", "dep:ug-metal"]
[[bench]]
name = "bench_main"
harness = false
[[example]]
name = "metal_basics"
required-features = ["metal"]
[[example]]
name = "cuda_basics"
required-features = ["cuda"]
| candle/candle-core/Cargo.toml/0 | {
"file_path": "candle/candle-core/Cargo.toml",
"repo_id": "candle",
"token_count": 604
} | 26 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use std::str::FromStr;
use anyhow::Result;
use candle_core::{Device, Tensor};
fn cos_sin(n: usize, device: &Device) -> Result<Tensor> {
let thetas: Vec<_> = (0..n).map(|i| (i as f32 / n as f32)).collect();
let xs: Vec<_> = thetas.iter().map(|t| t.cos().abs()).collect();
let ys: Vec<_> = thetas.iter().map(|t| t.sin().abs()).collect();
let xs = Tensor::from_vec(xs, (n, 1), device)?;
let ys = Tensor::from_vec(ys, (1, n), device)?;
let ys = Tensor::cat(&[&ys, &ys, &ys, &ys, &ys, &ys], 1)?;
Ok(xs.matmul(&ys)?)
}
fn main() -> Result<()> {
let device = Device::new_cuda(0)?;
let args = std::env::args().collect::<Vec<String>>();
let n = if args.len() < 2 {
2000usize
} else {
usize::from_str(&args[1])?
};
let xys_cpu = cos_sin(n, &Device::Cpu)?;
let xys = cos_sin(n, &device)?;
println!("{xys_cpu:?} {xys:?}");
let sum_keepdim_cpu = xys_cpu.sum_keepdim(1)?;
println!("{sum_keepdim_cpu}");
let sum_keepdim = xys.sum_keepdim(1)?;
println!("{sum_keepdim}");
let start = std::time::Instant::now();
let n_iters = 100;
let mut v = 0f32;
for _i in 0..n_iters {
let sum_keepdim = xys.sum_keepdim(1)?;
let sum_keepdim = sum_keepdim.sum_keepdim(0)?;
let sum_keepdim: f32 = sum_keepdim.reshape(&[])?.to_scalar()?;
v += sum_keepdim;
}
let elapsed = start.elapsed();
if v > 0. {
println!(
"ran {n_iters} iterations, time per iter: {:?} ({v})",
elapsed.div_f64(n_iters as f64)
);
}
Ok(())
}
| candle/candle-core/examples/cuda_sum_benchmark.rs/0 | {
"file_path": "candle/candle-core/examples/cuda_sum_benchmark.rs",
"repo_id": "candle",
"token_count": 827
} | 27 |
use crate::backend::BackendDevice;
use crate::{CpuStorage, CpuStorageRef, DType, Layout, Result, Shape};
pub use candle_kernels as kernels;
pub use cudarc;
use cudarc::driver::CudaFunction;
use float8::F8E4M3;
use half::{bf16, f16};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use super::{CudaError, CudaStorage, CudaStorageSlice, WrapErr};
/// Unique identifier for cuda devices.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct DeviceId(usize);
impl DeviceId {
fn new() -> Self {
// https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805
use std::sync::atomic;
static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1);
Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed))
}
}
struct CudaRng(cudarc::curand::CudaRng);
unsafe impl Send for CudaRng {}
pub struct ModuleStore {
mdls: [Option<Arc<cudarc::driver::CudaModule>>; kernels::ALL_IDS.len()],
}
#[derive(Clone)]
pub struct CudaDevice {
id: DeviceId,
context: Arc<cudarc::driver::CudaContext>,
modules: Arc<std::sync::RwLock<ModuleStore>>,
custom_modules: Arc<std::sync::RwLock<HashMap<String, Arc<cudarc::driver::CudaModule>>>>,
stream: Arc<cudarc::driver::CudaStream>,
pub(crate) blas: Arc<cudarc::cublas::CudaBlas>,
curand: Arc<Mutex<CudaRng>>,
}
impl std::fmt::Debug for CudaDevice {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "CudaDevice({:?})", self.id)
}
}
impl CudaDevice {
#[allow(clippy::missing_safety_doc)]
pub unsafe fn alloc<T: cudarc::driver::DeviceRepr>(
&self,
len: usize,
) -> Result<cudarc::driver::CudaSlice<T>> {
self.stream.alloc::<T>(len).w()
}
pub fn alloc_zeros<T: cudarc::driver::DeviceRepr + cudarc::driver::ValidAsZeroBits>(
&self,
len: usize,
) -> Result<cudarc::driver::CudaSlice<T>> {
self.stream.alloc_zeros::<T>(len).w()
}
pub fn memcpy_htod<
T: cudarc::driver::DeviceRepr,
Src: cudarc::driver::HostSlice<T> + ?Sized,
Dst: cudarc::driver::DevicePtrMut<T>,
>(
&self,
src: &Src,
dst: &mut Dst,
) -> Result<()> {
self.stream.memcpy_htod(src, dst).w()
}
pub fn memcpy_dtov<T: cudarc::driver::DeviceRepr, Src: cudarc::driver::DevicePtr<T>>(
&self,
src: &Src,
) -> Result<Vec<T>> {
self.stream.memcpy_dtov(src).w()
}
pub fn memcpy_dtod<
T,
Src: cudarc::driver::DevicePtr<T>,
Dst: cudarc::driver::DevicePtrMut<T>,
>(
&self,
src: &Src,
dst: &mut Dst,
) -> Result<()> {
self.stream.memcpy_dtod(src, dst).w()
}
pub fn memcpy_stod<
T: cudarc::driver::DeviceRepr,
Src: cudarc::driver::HostSlice<T> + ?Sized,
>(
&self,
src: &Src,
) -> Result<cudarc::driver::CudaSlice<T>> {
self.stream.memcpy_stod(src).w()
}
}
pub struct CudaFunc {
func: CudaFunction,
stream: Arc<cudarc::driver::CudaStream>,
}
impl std::ops::Deref for CudaFunc {
type Target = CudaFunction;
fn deref(&self) -> &Self::Target {
&self.func
}
}
impl CudaFunc {
pub fn into_cuda_function(self) -> CudaFunction {
self.func
}
}
#[macro_export]
macro_rules! builder_arg {
($b:ident, $($arg:expr),*) => {
$(
let __arg = $arg;
$b.arg(&__arg);
)*
};
}
impl CudaFunc {
pub fn builder(&self) -> cudarc::driver::LaunchArgs<'_> {
self.stream.launch_builder(&self.func)
}
}
impl CudaDevice {
pub fn cuda_stream(&self) -> Arc<cudarc::driver::CudaStream> {
self.stream.clone()
}
/// When turned on, all cuda tensors **created after calling this function** will
/// not track uses via cuda events.
///
/// # Safety
///
/// It is up to the user to ensure proper synchronization between multiple streams:
/// - Ensure that no tensor is freed before a use on another stream is finished.
/// - Ensure that a tensor is not used on another stream before allocation on the
/// allocating stream finishes.
/// - Ensure that a tensor is not written two concurrently by multiple streams.
pub unsafe fn disable_event_tracking(&self) {
self.context.disable_event_tracking()
}
pub fn is_event_tracking(&self) -> bool {
self.context.is_event_tracking()
}
#[cfg(not(target_arch = "wasm32"))]
pub fn compile(
&self,
func_name: &'static str,
kernel: ug::lang::ssa::Kernel,
) -> Result<CudaFunc> {
let mut buf = vec![];
ug_cuda::code_gen::gen(&mut buf, func_name, &kernel)?;
let cuda_code = String::from_utf8(buf)?;
let opts = cudarc::nvrtc::CompileOptions {
use_fast_math: Some(true),
..Default::default()
};
let ptx = cudarc::nvrtc::safe::compile_ptx_with_opts(cuda_code, opts).w()?;
let module = self.context.load_module(ptx).w()?;
let func = module.load_function(func_name).w()?;
Ok(CudaFunc {
func,
stream: self.stream.clone(),
})
}
pub fn id(&self) -> DeviceId {
self.id
}
pub fn get_or_load_custom_func(
&self,
fn_name: &str,
module_name: &str,
ptx: &str,
) -> Result<CudaFunc> {
let ms = self.custom_modules.read().unwrap();
if let Some(mdl) = ms.get(module_name).as_ref() {
let func = mdl.load_function(fn_name).w()?;
return Ok(CudaFunc {
func,
stream: self.stream.clone(),
});
}
drop(ms);
let mut ms = self.custom_modules.write().unwrap();
let cuda_module = self.context.load_module(ptx.into()).w()?;
ms.insert(module_name.to_string(), cuda_module.clone());
let func = cuda_module.load_function(fn_name).w()?;
Ok(CudaFunc {
func,
stream: self.stream.clone(),
})
}
pub fn get_or_load_func(&self, fn_name: &str, mdl: &kernels::Module) -> Result<CudaFunc> {
let ms = self.modules.read().unwrap();
if let Some(mdl) = ms.mdls[mdl.index()].as_ref() {
let func = mdl.load_function(fn_name).w()?;
return Ok(CudaFunc {
func,
stream: self.stream.clone(),
});
}
drop(ms);
let mut ms = self.modules.write().unwrap();
let cuda_module = self.context.load_module(mdl.ptx().into()).w()?;
ms.mdls[mdl.index()] = Some(cuda_module.clone());
let func = cuda_module.load_function(fn_name).w()?;
Ok(CudaFunc {
func,
stream: self.stream.clone(),
})
}
}
impl CudaDevice {
pub fn new_with_stream(ordinal: usize) -> Result<Self> {
let context = cudarc::driver::CudaContext::new(ordinal).w()?;
let stream = context.new_stream().w()?;
let blas = cudarc::cublas::CudaBlas::new(stream.clone()).w()?;
let curand = cudarc::curand::CudaRng::new(299792458, stream.clone()).w()?;
let module_store = ModuleStore {
mdls: [const { None }; kernels::ALL_IDS.len()],
};
Ok(Self {
id: DeviceId::new(),
context,
stream,
blas: Arc::new(blas),
curand: Arc::new(Mutex::new(CudaRng(curand))),
modules: Arc::new(std::sync::RwLock::new(module_store)),
custom_modules: Arc::new(std::sync::RwLock::new(HashMap::new())),
})
}
}
impl BackendDevice for CudaDevice {
type Storage = CudaStorage;
fn new(ordinal: usize) -> Result<Self> {
let context = cudarc::driver::CudaContext::new(ordinal).w()?;
let stream = context.default_stream();
let blas = cudarc::cublas::CudaBlas::new(stream.clone()).w()?;
let curand = cudarc::curand::CudaRng::new(299792458, stream.clone()).w()?;
let module_store = ModuleStore {
mdls: [const { None }; kernels::ALL_IDS.len()],
};
Ok(Self {
id: DeviceId::new(),
context,
stream,
blas: Arc::new(blas),
curand: Arc::new(Mutex::new(CudaRng(curand))),
modules: Arc::new(std::sync::RwLock::new(module_store)),
custom_modules: Arc::new(std::sync::RwLock::new(HashMap::new())),
})
}
fn set_seed(&self, seed: u64) -> Result<()> {
// We do not call set_seed but instead create a new curand object. This ensures that the
// state will be identical and the same random numbers will be generated.
let mut curand = self.curand.lock().unwrap();
curand.0 = cudarc::curand::CudaRng::new(seed, self.stream.clone()).w()?;
Ok(())
}
fn location(&self) -> crate::DeviceLocation {
crate::DeviceLocation::Cuda {
gpu_id: self.context.ordinal(),
}
}
fn same_device(&self, rhs: &Self) -> bool {
self.id == rhs.id
}
fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<CudaStorage> {
let elem_count = shape.elem_count();
let slice = match dtype {
DType::U8 => {
let data = self.alloc_zeros::<u8>(elem_count)?;
CudaStorageSlice::U8(data)
}
DType::U32 => {
let data = self.alloc_zeros::<u32>(elem_count)?;
CudaStorageSlice::U32(data)
}
DType::I64 => {
let data = self.alloc_zeros::<i64>(elem_count)?;
CudaStorageSlice::I64(data)
}
DType::BF16 => {
let data = self.alloc_zeros::<bf16>(elem_count)?;
CudaStorageSlice::BF16(data)
}
DType::F16 => {
let data = self.alloc_zeros::<f16>(elem_count)?;
CudaStorageSlice::F16(data)
}
DType::F32 => {
let data = self.alloc_zeros::<f32>(elem_count)?;
CudaStorageSlice::F32(data)
}
DType::F64 => {
let data = self.alloc_zeros::<f64>(elem_count)?;
CudaStorageSlice::F64(data)
}
DType::F8E4M3 => {
let data = self.alloc_zeros::<F8E4M3>(elem_count)?;
CudaStorageSlice::F8E4M3(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn rand_uniform(&self, shape: &Shape, dtype: DType, lo: f64, up: f64) -> Result<CudaStorage> {
let elem_count = shape.elem_count();
let curand = self.curand.lock().unwrap();
let slice = match dtype {
// TODO: Add support for F16 and BF16 though this is likely to require some upstream
// cudarc changes.
DType::U8 | DType::U32 | DType::I64 | DType::F16 | DType::BF16 | DType::F8E4M3 => {
Err(CudaError::UnsupportedDtype {
dtype,
op: "rand_uniform",
})
.w()?
}
DType::F32 => {
let mut data = unsafe { self.alloc::<f32>(elem_count)? };
curand.0.fill_with_uniform(&mut data).w()?;
CudaStorageSlice::F32(data)
}
DType::F64 => {
let mut data = unsafe { self.alloc::<f64>(elem_count)? };
curand.0.fill_with_uniform(&mut data).w()?;
CudaStorageSlice::F64(data)
}
};
let slice = if lo == 0. && up == 1.0 {
slice
} else {
use super::utils::Map1;
let layout = Layout::contiguous(shape);
super::Affine(up - lo, lo).map(&slice, self, &layout)?
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn rand_normal(&self, shape: &Shape, dtype: DType, mean: f64, std: f64) -> Result<CudaStorage> {
// TODO: Add support for F16 and BF16 though this is likely to require some upstream
// cudarc changes.
let elem_count = shape.elem_count();
let curand = self.curand.lock().unwrap();
// curand can only generate an odd number of values.
// https://github.com/huggingface/candle/issues/734
let elem_count_round = if elem_count % 2 == 1 {
elem_count + 1
} else {
elem_count
};
let slice = match dtype {
DType::U8 | DType::U32 | DType::I64 | DType::F16 | DType::BF16 | DType::F8E4M3 => {
Err(CudaError::UnsupportedDtype {
dtype,
op: "rand_normal",
})
.w()?
}
DType::F32 => {
let mut data = unsafe { self.alloc::<f32>(elem_count_round)? };
curand
.0
.fill_with_normal(&mut data, mean as f32, std as f32)
.w()?;
CudaStorageSlice::F32(data)
}
DType::F64 => {
let mut data = unsafe { self.alloc::<f64>(elem_count_round)? };
curand.0.fill_with_normal(&mut data, mean, std).w()?;
CudaStorageSlice::F64(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
unsafe fn alloc_uninit(&self, shape: &Shape, dtype: DType) -> Result<Self::Storage> {
let elem_count = shape.elem_count();
let slice = match dtype {
DType::U8 => {
let data = self.alloc::<u8>(elem_count)?;
CudaStorageSlice::U8(data)
}
DType::U32 => {
let data = self.alloc::<u32>(elem_count)?;
CudaStorageSlice::U32(data)
}
DType::I64 => {
let data = self.alloc::<i64>(elem_count)?;
CudaStorageSlice::I64(data)
}
DType::BF16 => {
let data = self.alloc::<bf16>(elem_count)?;
CudaStorageSlice::BF16(data)
}
DType::F16 => {
let data = self.alloc::<f16>(elem_count)?;
CudaStorageSlice::F16(data)
}
DType::F32 => {
let data = self.alloc::<f32>(elem_count)?;
CudaStorageSlice::F32(data)
}
DType::F64 => {
let data = self.alloc::<f64>(elem_count)?;
CudaStorageSlice::F64(data)
}
DType::F8E4M3 => {
let data = self.alloc::<F8E4M3>(elem_count)?;
CudaStorageSlice::F8E4M3(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn storage_from_slice<T: crate::WithDType>(&self, s: &[T]) -> Result<Self::Storage> {
let slice = match T::cpu_storage_ref(s) {
CpuStorageRef::U8(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::U8(data)
}
CpuStorageRef::U32(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::U32(data)
}
CpuStorageRef::I64(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::I64(data)
}
CpuStorageRef::BF16(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::BF16(data)
}
CpuStorageRef::F16(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::F16(data)
}
CpuStorageRef::F32(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::F32(data)
}
CpuStorageRef::F64(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::F64(data)
}
CpuStorageRef::F8E4M3(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::F8E4M3(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn storage_from_cpu_storage(&self, storage: &CpuStorage) -> Result<CudaStorage> {
let slice = match storage {
CpuStorage::U8(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::U8(data)
}
CpuStorage::U32(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::U32(data)
}
CpuStorage::I64(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::I64(data)
}
CpuStorage::BF16(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::BF16(data)
}
CpuStorage::F16(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::F16(data)
}
CpuStorage::F32(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::F32(data)
}
CpuStorage::F64(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::F64(data)
}
CpuStorage::F8E4M3(storage) => {
let data = self.memcpy_stod(storage)?;
CudaStorageSlice::F8E4M3(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn storage_from_cpu_storage_owned(&self, storage: CpuStorage) -> Result<CudaStorage> {
let slice = match storage {
CpuStorage::U8(storage) => {
let data = self.memcpy_stod(&storage)?;
CudaStorageSlice::U8(data)
}
CpuStorage::U32(storage) => {
let data = self.memcpy_stod(&storage)?;
CudaStorageSlice::U32(data)
}
CpuStorage::I64(storage) => {
let data = self.memcpy_stod(&storage)?;
CudaStorageSlice::I64(data)
}
CpuStorage::BF16(storage) => {
let data = self.memcpy_stod(&storage)?;
CudaStorageSlice::BF16(data)
}
CpuStorage::F16(storage) => {
let data = self.memcpy_stod(&storage)?;
CudaStorageSlice::F16(data)
}
CpuStorage::F32(storage) => {
let data = self.memcpy_stod(&storage)?;
CudaStorageSlice::F32(data)
}
CpuStorage::F64(storage) => {
let data = self.memcpy_stod(&storage)?;
CudaStorageSlice::F64(data)
}
CpuStorage::F8E4M3(storage) => {
let data = self.memcpy_stod(&storage)?;
CudaStorageSlice::F8E4M3(data)
}
};
Ok(CudaStorage {
slice,
device: self.clone(),
})
}
fn synchronize(&self) -> Result<()> {
self.stream.synchronize().map_err(crate::Error::wrap)?;
Ok(())
}
}
| candle/candle-core/src/cuda_backend/device.rs/0 | {
"file_path": "candle/candle-core/src/cuda_backend/device.rs",
"repo_id": "candle",
"token_count": 10496
} | 28 |
#![allow(dead_code)]
use libc::{c_char, c_double, c_float, c_int};
mod ffi {
use super::*;
extern "C" {
pub fn vsTanh(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdTanh(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsExp(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdExp(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsLn(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdLn(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsSin(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdSin(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsCos(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdCos(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsSqrt(n: c_int, a: *const c_float, y: *mut c_float);
pub fn vdSqrt(n: c_int, a: *const c_double, y: *mut c_double);
pub fn vsAdd(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdAdd(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn vsSub(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdSub(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn vsMul(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdMul(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn vsDiv(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdDiv(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn vsFmax(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdFmax(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn vsFmin(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float);
pub fn vdFmin(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double);
pub fn sgemm_(
transa: *const c_char,
transb: *const c_char,
m: *const c_int,
n: *const c_int,
k: *const c_int,
alpha: *const c_float,
a: *const c_float,
lda: *const c_int,
b: *const c_float,
ldb: *const c_int,
beta: *const c_float,
c: *mut c_float,
ldc: *const c_int,
);
pub fn dgemm_(
transa: *const c_char,
transb: *const c_char,
m: *const c_int,
n: *const c_int,
k: *const c_int,
alpha: *const c_double,
a: *const c_double,
lda: *const c_int,
b: *const c_double,
ldb: *const c_int,
beta: *const c_double,
c: *mut c_double,
ldc: *const c_int,
);
pub fn hgemm_(
transa: *const c_char,
transb: *const c_char,
m: *const c_int,
n: *const c_int,
k: *const c_int,
alpha: *const half::f16,
a: *const half::f16,
lda: *const c_int,
b: *const half::f16,
ldb: *const c_int,
beta: *const half::f16,
c: *mut half::f16,
ldc: *const c_int,
);
}
}
#[allow(clippy::too_many_arguments)]
#[inline]
pub unsafe fn sgemm(
transa: u8,
transb: u8,
m: i32,
n: i32,
k: i32,
alpha: f32,
a: &[f32],
lda: i32,
b: &[f32],
ldb: i32,
beta: f32,
c: &mut [f32],
ldc: i32,
) {
ffi::sgemm_(
&(transa as c_char),
&(transb as c_char),
&m,
&n,
&k,
&alpha,
a.as_ptr(),
&lda,
b.as_ptr(),
&ldb,
&beta,
c.as_mut_ptr(),
&ldc,
)
}
#[allow(clippy::too_many_arguments)]
#[inline]
pub unsafe fn dgemm(
transa: u8,
transb: u8,
m: i32,
n: i32,
k: i32,
alpha: f64,
a: &[f64],
lda: i32,
b: &[f64],
ldb: i32,
beta: f64,
c: &mut [f64],
ldc: i32,
) {
ffi::dgemm_(
&(transa as c_char),
&(transb as c_char),
&m,
&n,
&k,
&alpha,
a.as_ptr(),
&lda,
b.as_ptr(),
&ldb,
&beta,
c.as_mut_ptr(),
&ldc,
)
}
#[allow(clippy::too_many_arguments)]
#[inline]
pub unsafe fn hgemm(
transa: u8,
transb: u8,
m: i32,
n: i32,
k: i32,
alpha: half::f16,
a: &[half::f16],
lda: i32,
b: &[half::f16],
ldb: i32,
beta: half::f16,
c: &mut [half::f16],
ldc: i32,
) {
ffi::hgemm_(
&(transa as c_char),
&(transb as c_char),
&m,
&n,
&k,
&alpha,
a.as_ptr(),
&lda,
b.as_ptr(),
&ldb,
&beta,
c.as_mut_ptr(),
&ldc,
)
}
#[inline]
pub fn vs_exp(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsExp(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_exp(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdExp(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_ln(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsLn(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_ln(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdLn(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_sin(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsSin(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_sin(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdSin(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_cos(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsCos(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_cos(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdCos(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_sqrt(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsSqrt(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_sqrt(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdSqrt(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_sqr(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsMul(a_len as i32, a.as_ptr(), a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_sqr(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdMul(a_len as i32, a.as_ptr(), a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_tanh(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vsTanh(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_tanh(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vdTanh(a_len as i32, a.as_ptr(), y.as_mut_ptr()) }
}
// The vector functions from mkl can be performed in place by using the same array for input and
// output.
// https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/vector-mathematical-functions.html
#[inline]
pub fn vs_tanh_inplace(y: &mut [f32]) {
unsafe { ffi::vsTanh(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_tanh_inplace(y: &mut [f64]) {
unsafe { ffi::vdTanh(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_exp_inplace(y: &mut [f32]) {
unsafe { ffi::vsExp(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vd_exp_inplace(y: &mut [f64]) {
unsafe { ffi::vdExp(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) }
}
#[inline]
pub fn vs_gelu(vs: &[f32], ys: &mut [f32]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = (2.0f32 / std::f32::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v)
}
vs_tanh_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = 0.5 * v * (1.0 + *y)
}
}
#[inline]
pub fn vd_gelu(vs: &[f64], ys: &mut [f64]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = (2.0f64 / std::f64::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v)
}
vd_tanh_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = 0.5 * v * (1.0 + *y)
}
}
#[inline]
pub fn vs_silu(vs: &[f32], ys: &mut [f32]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = -v
}
vs_exp_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = v / (1.0 + *y)
}
}
#[inline]
pub fn vd_silu(vs: &[f64], ys: &mut [f64]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = -v
}
vd_exp_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = v / (1.0 + *y)
}
}
macro_rules! binary_op {
($fn_name:ident, $ty:ty, $mkl_name:ident) => {
#[inline]
pub fn $fn_name(a: &[$ty], b: &[$ty], y: &mut [$ty]) {
let a_len = a.len();
let b_len = b.len();
let y_len = y.len();
if a_len != y_len || b_len != y_len {
panic!(
"{} a,b,y len mismatch {a_len} {b_len} {y_len}",
stringify!($fn_name)
);
}
unsafe { ffi::$mkl_name(a_len as i32, a.as_ptr(), b.as_ptr(), y.as_mut_ptr()) }
}
};
}
binary_op!(vs_add, f32, vsAdd);
binary_op!(vd_add, f64, vdAdd);
binary_op!(vs_sub, f32, vsSub);
binary_op!(vd_sub, f64, vdSub);
binary_op!(vs_mul, f32, vsMul);
binary_op!(vd_mul, f64, vdMul);
binary_op!(vs_div, f32, vsDiv);
binary_op!(vd_div, f64, vdDiv);
binary_op!(vs_max, f32, vsFmax);
binary_op!(vd_max, f64, vdFmax);
binary_op!(vs_min, f32, vsFmin);
binary_op!(vd_min, f64, vdFmin);
| candle/candle-core/src/mkl.rs/0 | {
"file_path": "candle/candle-core/src/mkl.rs",
"repo_id": "candle",
"token_count": 6463
} | 29 |
//! Module to load `safetensor` files into CPU/GPU memory.
//!
//! There are multiple ways to load tensors from safetensor files:
//! - `load` function for loading directly into memory and returning a HashMap of tensors
//! - `MmapedSafetensors` for memory mapping files and avoiding full allocation
//! - `SliceSafetensors` for working with in-memory buffers
//! - `BufferedSafetensors` for owning a buffer of data
//!
//! Tensors can also be serialized to safetensor format using the `save` function or
//! `Tensor::save_safetensors` method.
//!
use crate::{DType, Device, Error, Result, Tensor, WithDType};
use float8::F8E4M3;
use safetensors::tensor as st;
use safetensors::tensor::SafeTensors;
use std::borrow::Cow;
use std::collections::HashMap;
use std::path::Path;
impl From<DType> for st::Dtype {
fn from(value: DType) -> Self {
match value {
DType::U8 => st::Dtype::U8,
DType::U32 => st::Dtype::U32,
DType::I64 => st::Dtype::I64,
DType::BF16 => st::Dtype::BF16,
DType::F16 => st::Dtype::F16,
DType::F32 => st::Dtype::F32,
DType::F64 => st::Dtype::F64,
DType::F8E4M3 => st::Dtype::F8_E4M3,
}
}
}
impl TryFrom<st::Dtype> for DType {
type Error = Error;
fn try_from(value: st::Dtype) -> Result<Self> {
match value {
st::Dtype::U8 => Ok(DType::U8),
st::Dtype::U32 => Ok(DType::U32),
st::Dtype::I64 => Ok(DType::I64),
st::Dtype::BF16 => Ok(DType::BF16),
st::Dtype::F16 => Ok(DType::F16),
st::Dtype::F32 => Ok(DType::F32),
st::Dtype::F64 => Ok(DType::F64),
st::Dtype::F8_E4M3 => Ok(DType::F8E4M3),
dtype => Err(Error::UnsupportedSafeTensorDtype(dtype)),
}
}
}
impl st::View for Tensor {
fn dtype(&self) -> st::Dtype {
self.dtype().into()
}
fn shape(&self) -> &[usize] {
self.shape().dims()
}
fn data(&self) -> Cow<'_, [u8]> {
// This copies data from GPU to CPU.
// TODO: Avoid the unwrap here.
Cow::Owned(convert_back(self).unwrap())
}
fn data_len(&self) -> usize {
let n: usize = self.shape().elem_count();
let bytes_per_element = self.dtype().size_in_bytes();
n * bytes_per_element
}
}
impl st::View for &Tensor {
fn dtype(&self) -> st::Dtype {
(*self).dtype().into()
}
fn shape(&self) -> &[usize] {
self.dims()
}
fn data(&self) -> Cow<'_, [u8]> {
// This copies data from GPU to CPU.
// TODO: Avoid the unwrap here.
Cow::Owned(convert_back(self).unwrap())
}
fn data_len(&self) -> usize {
let n: usize = self.dims().iter().product();
let bytes_per_element = (*self).dtype().size_in_bytes();
n * bytes_per_element
}
}
impl Tensor {
pub fn save_safetensors<P: AsRef<Path>>(&self, name: &str, filename: P) -> Result<()> {
let data = [(name, self.clone())];
Ok(st::serialize_to_file(data, &None, filename.as_ref())?)
}
}
fn convert_slice<T: WithDType>(data: &[u8], shape: &[usize], device: &Device) -> Result<Tensor> {
let size_in_bytes = T::DTYPE.size_in_bytes();
let elem_count = data.len() / size_in_bytes;
if (data.as_ptr() as usize) % size_in_bytes == 0 {
// SAFETY This is safe because we just checked that this
// was correctly aligned.
let data: &[T] =
unsafe { std::slice::from_raw_parts(data.as_ptr() as *const T, elem_count) };
Tensor::from_slice(data, shape, device)
} else {
// XXX: We need to specify `T` here, otherwise the compiler will infer u8 because of the following cast
// Making this vector too small to fit a full f16/f32/f64 weights, resulting in out-of-bounds access
let mut c: Vec<T> = Vec::with_capacity(elem_count);
// SAFETY: We just created c, so the allocated memory is necessarily
// contiguous and non overlapping with the view's data.
// We're downgrading the `c` pointer from T to u8, which removes alignment
// constraints.
unsafe {
std::ptr::copy_nonoverlapping(data.as_ptr(), c.as_mut_ptr() as *mut u8, data.len());
c.set_len(elem_count)
}
Tensor::from_slice(&c, shape, device)
}
}
fn convert_slice_with_cast<T: Sized + Copy, U: WithDType, F: Fn(T) -> Result<U>>(
data: &[u8],
shape: &[usize],
device: &Device,
conv: F,
) -> Result<Tensor> {
let size_in_bytes = std::mem::size_of::<T>();
let elem_count = data.len() / size_in_bytes;
if (data.as_ptr() as usize) % size_in_bytes == 0 {
// SAFETY This is safe because we just checked that this
// was correctly aligned.
let data: &[T] =
unsafe { std::slice::from_raw_parts(data.as_ptr() as *const T, elem_count) };
let data = data.iter().map(|t| conv(*t)).collect::<Result<Vec<_>>>()?;
Tensor::from_vec(data, shape, device)
} else {
// XXX: We need to specify `T` here, otherwise the compiler will infer u8 because of the following cast
// Making this vector too small to fit a full f16/f32/f64 weights, resulting in out-of-bounds access
let mut c: Vec<T> = Vec::with_capacity(elem_count);
// SAFETY: We just created c, so the allocated memory is necessarily
// contiguous and non overlapping with the view's data.
// We're downgrading the `c` pointer from T to u8, which removes alignment
// constraints.
unsafe {
std::ptr::copy_nonoverlapping(data.as_ptr(), c.as_mut_ptr() as *mut u8, data.len());
c.set_len(elem_count)
}
let c = c.into_iter().map(conv).collect::<Result<Vec<_>>>()?;
Tensor::from_vec(c, shape, device)
}
}
fn convert_with_cast_<T: Sized + Copy, U: WithDType, F: Fn(T) -> Result<U>>(
view: &st::TensorView<'_>,
device: &Device,
conv: F,
) -> Result<Tensor> {
convert_slice_with_cast::<T, U, F>(view.data(), view.shape(), device, conv)
}
fn convert_<T: WithDType>(view: &st::TensorView<'_>, device: &Device) -> Result<Tensor> {
convert_slice::<T>(view.data(), view.shape(), device)
}
fn convert_back_<T: WithDType>(mut vs: Vec<T>) -> Vec<u8> {
let size_in_bytes = T::DTYPE.size_in_bytes();
let length = vs.len() * size_in_bytes;
let capacity = vs.capacity() * size_in_bytes;
let ptr = vs.as_mut_ptr() as *mut u8;
// Don't run the destructor for Vec<T>
std::mem::forget(vs);
// SAFETY:
//
// Every T is larger than u8, so there is no issue regarding alignment.
// This re-interpret the Vec<T> as a Vec<u8>.
unsafe { Vec::from_raw_parts(ptr, length, capacity) }
}
pub trait Load {
fn load(&self, device: &Device) -> Result<Tensor>;
}
impl Load for st::TensorView<'_> {
fn load(&self, device: &Device) -> Result<Tensor> {
convert(self, device)
}
}
impl Tensor {
pub fn from_raw_buffer(
data: &[u8],
dtype: DType,
shape: &[usize],
device: &Device,
) -> Result<Self> {
match dtype {
DType::U8 => convert_slice::<u8>(data, shape, device),
DType::U32 => convert_slice::<u32>(data, shape, device),
DType::I64 => convert_slice::<i64>(data, shape, device),
DType::BF16 => convert_slice::<half::bf16>(data, shape, device),
DType::F16 => convert_slice::<half::f16>(data, shape, device),
DType::F32 => convert_slice::<f32>(data, shape, device),
DType::F64 => convert_slice::<f64>(data, shape, device),
DType::F8E4M3 => convert_slice::<F8E4M3>(data, shape, device),
}
}
}
fn convert(view: &st::TensorView<'_>, device: &Device) -> Result<Tensor> {
match view.dtype() {
st::Dtype::I8 => {
let conv = |x| Ok(i64::from(x));
convert_with_cast_::<i8, i64, _>(view, device, conv)
}
st::Dtype::U8 => convert_::<u8>(view, device),
st::Dtype::U16 => {
let conv = |x| Ok(u32::from(x));
convert_with_cast_::<u16, u32, _>(view, device, conv)
}
st::Dtype::U32 => convert_::<u32>(view, device),
st::Dtype::I32 => {
let conv = |x| Ok(i64::from(x));
convert_with_cast_::<i32, i64, _>(view, device, conv)
}
st::Dtype::I64 => convert_::<i64>(view, device),
st::Dtype::BF16 => convert_::<half::bf16>(view, device),
st::Dtype::F16 => convert_::<half::f16>(view, device),
st::Dtype::F32 => convert_::<f32>(view, device),
st::Dtype::F64 => convert_::<f64>(view, device),
dtype => Err(Error::UnsupportedSafeTensorDtype(dtype)),
}
}
fn convert_back(tensor: &Tensor) -> Result<Vec<u8>> {
// TODO: This makes an unnecessary copy when the tensor is on the cpu.
let tensor = tensor.flatten_all()?;
match tensor.dtype() {
DType::U8 => Ok(convert_back_::<u8>(tensor.to_vec1()?)),
DType::U32 => Ok(convert_back_::<u32>(tensor.to_vec1()?)),
DType::I64 => Ok(convert_back_::<i64>(tensor.to_vec1()?)),
DType::F16 => Ok(convert_back_::<half::f16>(tensor.to_vec1()?)),
DType::BF16 => Ok(convert_back_::<half::bf16>(tensor.to_vec1()?)),
DType::F32 => Ok(convert_back_::<f32>(tensor.to_vec1()?)),
DType::F64 => Ok(convert_back_::<f64>(tensor.to_vec1()?)),
DType::F8E4M3 => Ok(convert_back_::<F8E4M3>(tensor.to_vec1()?)),
}
}
pub fn load<P: AsRef<Path>>(filename: P, device: &Device) -> Result<HashMap<String, Tensor>> {
let data = std::fs::read(filename.as_ref())?;
load_buffer(&data[..], device)
}
pub fn load_buffer(data: &[u8], device: &Device) -> Result<HashMap<String, Tensor>> {
let st = safetensors::SafeTensors::deserialize(data)?;
st.tensors()
.into_iter()
.map(|(name, view)| Ok((name, view.load(device)?)))
.collect()
}
pub fn save<K: AsRef<str> + Ord + std::fmt::Display, P: AsRef<Path>>(
tensors: &HashMap<K, Tensor>,
filename: P,
) -> Result<()> {
Ok(st::serialize_to_file(tensors, &None, filename.as_ref())?)
}
#[derive(yoke::Yokeable)]
struct SafeTensors_<'a>(SafeTensors<'a>);
pub struct MmapedSafetensors {
safetensors: Vec<yoke::Yoke<SafeTensors_<'static>, memmap2::Mmap>>,
routing: Option<HashMap<String, usize>>,
}
impl MmapedSafetensors {
/// Creates a wrapper around a memory mapped file and deserialize the safetensors header.
///
/// # Safety
///
/// The unsafe is inherited from [`memmap2::MmapOptions`].
pub unsafe fn new<P: AsRef<Path>>(p: P) -> Result<Self> {
let p = p.as_ref();
let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?;
let file = memmap2::MmapOptions::new()
.map(&file)
.map_err(|e| Error::from(e).with_path(p))?;
let safetensors = yoke::Yoke::<SafeTensors_<'static>, memmap2::Mmap>::try_attach_to_cart(
file,
|data: &[u8]| {
let st = safetensors::SafeTensors::deserialize(data)
.map_err(|e| Error::from(e).with_path(p))?;
Ok::<_, Error>(SafeTensors_(st))
},
)?;
Ok(Self {
safetensors: vec![safetensors],
routing: None,
})
}
/// Creates a wrapper around multiple memory mapped file and deserialize the safetensors headers.
///
/// If a tensor name appears in multiple files, the last entry is returned.
///
/// # Safety
///
/// The unsafe is inherited from [`memmap2::MmapOptions`].
pub unsafe fn multi<P: AsRef<Path>>(paths: &[P]) -> Result<Self> {
let mut routing = HashMap::new();
let mut safetensors = vec![];
for (index, p) in paths.iter().enumerate() {
let p = p.as_ref();
let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?;
let file = memmap2::MmapOptions::new()
.map(&file)
.map_err(|e| Error::from(e).with_path(p))?;
let data = yoke::Yoke::<SafeTensors_<'static>, memmap2::Mmap>::try_attach_to_cart(
file,
|data: &[u8]| {
let st = safetensors::SafeTensors::deserialize(data)
.map_err(|e| Error::from(e).with_path(p))?;
Ok::<_, Error>(SafeTensors_(st))
},
)?;
for k in data.get().0.names() {
routing.insert(k.to_string(), index);
}
safetensors.push(data)
}
Ok(Self {
safetensors,
routing: Some(routing),
})
}
pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> {
self.get(name)?.load(dev)
}
pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> {
let mut tensors = vec![];
for safetensors in self.safetensors.iter() {
tensors.push(safetensors.get().0.tensors())
}
tensors.into_iter().flatten().collect()
}
pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> {
let index = match &self.routing {
None => 0,
Some(routing) => {
let index = routing.get(name).ok_or_else(|| {
Error::CannotFindTensor {
path: name.to_string(),
}
.bt()
})?;
*index
}
};
Ok(self.safetensors[index].get().0.tensor(name)?)
}
}
pub struct SliceSafetensors<'a> {
safetensors: SafeTensors<'a>,
}
impl<'a> SliceSafetensors<'a> {
/// Creates a wrapper around a binary buffer and deserialize the safetensors header.
pub fn new(buffer: &'a [u8]) -> Result<Self> {
let safetensors = safetensors::SafeTensors::deserialize(buffer)?;
Ok(Self { safetensors })
}
pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> {
self.safetensors.tensor(name)?.load(dev)
}
pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> {
self.safetensors.tensors()
}
pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> {
Ok(self.safetensors.tensor(name)?)
}
}
pub struct BufferedSafetensors {
safetensors: yoke::Yoke<SafeTensors_<'static>, Vec<u8>>,
}
impl BufferedSafetensors {
/// Creates a wrapper around a binary buffer and deserialize the safetensors header.
pub fn new(buffer: Vec<u8>) -> Result<Self> {
let safetensors = yoke::Yoke::<SafeTensors_<'static>, Vec<u8>>::try_attach_to_cart(
buffer,
|data: &[u8]| {
let st = safetensors::SafeTensors::deserialize(data)?;
Ok::<_, Error>(SafeTensors_(st))
},
)?;
Ok(Self { safetensors })
}
pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> {
self.get(name)?.load(dev)
}
pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> {
self.safetensors.get().0.tensors()
}
pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> {
Ok(self.safetensors.get().0.tensor(name)?)
}
}
pub struct MmapedFile {
path: std::path::PathBuf,
inner: memmap2::Mmap,
}
impl MmapedFile {
/// Creates a wrapper around a memory mapped file from which you can retrieve
/// tensors using [`MmapedFile::deserialize`]
///
/// # Safety
///
/// The unsafe is inherited from [`memmap2::MmapOptions`].
pub unsafe fn new<P: AsRef<Path>>(p: P) -> Result<Self> {
let p = p.as_ref();
let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?;
let inner = memmap2::MmapOptions::new()
.map(&file)
.map_err(|e| Error::from(e).with_path(p))?;
Ok(Self {
inner,
path: p.to_path_buf(),
})
}
pub fn deserialize(&self) -> Result<SafeTensors<'_>> {
let st = safetensors::SafeTensors::deserialize(&self.inner)
.map_err(|e| Error::from(e).with_path(&self.path))?;
Ok(st)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[test]
fn save_single_tensor() {
let t = Tensor::zeros((2, 2), DType::F32, &Device::Cpu).unwrap();
t.save_safetensors("t", "t.safetensors").unwrap();
let bytes = std::fs::read("t.safetensors").unwrap();
assert_eq!(bytes, b"@\0\0\0\0\0\0\0{\"t\":{\"dtype\":\"F32\",\"shape\":[2,2],\"data_offsets\":[0,16]}} \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
std::fs::remove_file("t.safetensors").unwrap();
}
#[test]
fn save_load_multiple_tensors() {
let t = Tensor::zeros((2, 2), DType::F32, &Device::Cpu).unwrap();
let u = Tensor::zeros((1, 2), DType::F32, &Device::Cpu).unwrap();
let map: HashMap<_, _> = [("t", t), ("u", u)].into_iter().collect();
save(&map, "multi.safetensors").unwrap();
let weights = load("multi.safetensors", &Device::Cpu).unwrap();
assert_eq!(weights.get("t").unwrap().dims(), &[2, 2]);
assert_eq!(weights.get("u").unwrap().dims(), &[1, 2]);
let bytes = std::fs::read("multi.safetensors").unwrap();
assert_eq!(bytes, b"x\0\0\0\0\0\0\0{\"t\":{\"dtype\":\"F32\",\"shape\":[2,2],\"data_offsets\":[0,16]},\"u\":{\"dtype\":\"F32\",\"shape\":[1,2],\"data_offsets\":[16,24]}} \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
std::fs::remove_file("multi.safetensors").unwrap();
}
#[test]
fn load_i8() {
let bytes = b"8\0\0\0\0\0\0\0{\"x\":{\"dtype\":\"I8\",\"shape\":[2],\"data_offsets\":[0,2]}} \x01\x03";
std::fs::write("test_i8.safetensors", bytes).unwrap();
let weights = load("test_i8.safetensors", &Device::Cpu).unwrap();
let tensor = weights.get("x").unwrap();
assert_eq!(tensor.dims(), &[2]);
assert_eq!(tensor.dtype(), DType::I64);
let data: Vec<i64> = tensor.to_vec1().unwrap();
assert_eq!(data, vec![1, 3]);
std::fs::remove_file("test_i8.safetensors").unwrap();
}
}
| candle/candle-core/src/safetensors.rs/0 | {
"file_path": "candle/candle-core/src/safetensors.rs",
"repo_id": "candle",
"token_count": 8785
} | 30 |
#![allow(clippy::approx_constant)]
use anyhow::{Context, Result};
use candle_core::{test_device, test_utils, DType, Device, Shape, Tensor, Var};
fn simple_grad(device: &Device) -> Result<()> {
let x = Var::new(&[3f32, 1., 4.], device)?;
let x = x.as_tensor();
let y = (((x * x)? + x * 5f64)? + 4f64)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(x.to_vec1::<f32>()?, [3., 1., 4.]);
// y = x^2 + 5.x + 4
assert_eq!(y.to_vec1::<f32>()?, [28., 10., 40.]);
// dy/dx = 2.x + 5
assert_eq!(grad_x.to_vec1::<f32>()?, [11., 7., 13.]);
Ok(())
}
fn sum_grad(device: &Device) -> Result<()> {
let x = Var::new(&[3f32, 1., 4.], device)?;
let x = x.as_tensor();
let y = (x.sqr()?.sum_keepdim(0)? * 2.)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [52.]);
// y = 2.x^2 so dy/dx = 4.x
assert_eq!(grad_x.to_vec1::<f32>()?, &[12., 4., 16.]);
// Same test as before but squeezing on the last dimension.
let y = (x.sqr()?.sum_keepdim(0)? * 2.)?.squeeze(0)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_scalar::<f32>()?, 52.);
// y = 2.x^2 so dy/dx = 4.x
assert_eq!(grad_x.to_vec1::<f32>()?, &[12., 4., 16.]);
Ok(())
}
fn matmul_grad(device: &Device) -> Result<()> {
let data: Vec<_> = (0..12).map(|i| i as f32).collect();
let x = Var::from_slice(&data, (2, 2, 3), device)?;
let data: Vec<_> = (0..12).map(|i| i as f32).collect();
let y = Var::from_slice(&data, (2, 3, 2), device)?;
let c = x.matmul(&y)?;
let grads = c.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
let grad_y = grads.get(&y).context("no grad for y")?;
assert_eq!(grad_x.shape(), &Shape::from((2, 2, 3)));
assert_eq!(grad_y.shape(), &Shape::from((2, 3, 2)));
assert_eq!(
&*grad_x.to_vec3::<f32>()?,
&[
[[1., 5., 9.], [1., 5., 9.]],
[[13., 17., 21.], [13., 17., 21.]]
]
);
assert_eq!(
&*grad_y.to_vec3::<f32>()?,
&[
[[3., 3.], [5., 5.], [7., 7.]],
[[15., 15.], [17., 17.], [19., 19.]]
]
);
Ok(())
}
// The simplest gradient descent, using scalar variable.
fn grad_descent(device: &Device) -> Result<()> {
let x = Var::new(0f32, device)?;
let learning_rate = 0.1;
for _step in 0..100 {
let xt = x.as_tensor();
let c = ((xt - 4.2)? * (xt - 4.2)?)?;
let grads = c.backward()?;
let x_grad = grads.get(&x).context("no grad for x")?;
x.set(&(xt - x_grad * learning_rate)?)?
}
assert_eq!(x.to_scalar::<f32>()?, 4.199999);
Ok(())
}
fn unary_grad(device: &Device) -> Result<()> {
let x = Var::new(&[3f32, 1., 4., 0.15], device)?;
let x = x.as_tensor();
let y = (x.log()? + 1.)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[2.0986, 1.0, 2.3863, -0.8971]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[0.3333, 1.0, 0.25, 6.6667]
);
let y = x.exp()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[20.0855, 2.7183, 54.5982, 1.1618]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[20.0855, 2.7183, 54.5982, 1.1618]
);
let y = x.exp()?.sqr()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 3)?,
[403.429, 7.389, 2980.958, 1.35]
);
// exp(x)^2 = exp(2*x)
assert_eq!(
test_utils::to_vec1_round(grad_x, 2)?,
[806.86, 14.78, 5961.92, 2.7]
);
let y = x.sin()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[0.1411, 0.8415, -0.7568, 0.1494],
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[-0.99, 0.5403, -0.6536, 0.9888],
);
let y = x.cos()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[-0.99, 0.5403, -0.6536, 0.9888],
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[-0.1411, -0.8415, 0.7568, -0.1494],
);
let y = x.sqr()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [9.0, 1.0, 16.0, 0.0225]);
assert_eq!(grad_x.to_vec1::<f32>()?, [6.0, 2.0, 8.0, 0.3]);
let y = x.sqr()?.sqrt()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [3.0, 1.0, 4.0, 0.15]);
assert_eq!(test_utils::to_vec1_round(grad_x, 4)?, [1.0, 1.0, 1.0, 1.0]);
let y = x.neg()?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [-3.0, -1.0, -4.0, -0.15]);
assert_eq!(grad_x.to_vec1::<f32>()?, [-1.0, -1.0, -1.0, -1.0]);
let y = x.affine(0.2, 1.)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [1.6, 1.2, 1.8, 1.03]);
assert_eq!(grad_x.to_vec1::<f32>()?, [0.2, 0.2, 0.2, 0.2]);
let y = Tensor::new(1f32, device)?.broadcast_div(x)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[0.3333, 1.0, 0.25, 6.6667]
);
assert_eq!(
grad_x.to_vec1::<f32>()?,
[-0.11111111, -1.0, -0.0625, -44.444443],
);
let y = x.broadcast_div(&Tensor::new(0.5f32, device)?)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [6., 2., 8., 0.3]);
assert_eq!(grad_x.to_vec1::<f32>()?, [2., 2., 2., 2.]);
let x = Var::new(&[3f32, 1., 4., 0.15], device)?;
let y = x.powf(2.5)?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(test_utils::to_vec1_round(&y, 2)?, [15.59, 1.0, 32.0, 0.01]);
assert_eq!(
test_utils::to_vec1_round(grad_x, 2)?,
[12.99, 2.5, 20.0, 0.15]
);
let y = x.tanh()?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(test_utils::to_vec1_round(&y, 2)?, [1.0, 0.76, 1.0, 0.15]);
assert_eq!(
test_utils::to_vec1_round(grad_x, 2)?,
[0.01, 0.42, 0.0, 0.98],
);
// testing compared to pytorch nn.GELU(approximate = 'tanh')
let y = x.gelu()?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[2.9964, 0.8412, 3.9999, 0.0839]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[1.0116, 1.0830, 1.0003, 0.6188],
);
// Testing compared to pytorch torch.erf
//
// import torch
// x = torch.tensor([3.0, 1.0, 4.0, 0.15], requires_grad=True)
// y = x.erf()
// print(y)
// loss = y.sum()
// loss.backward()
// print(x.grad)
let y = x.erf()?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(test_utils::to_vec1_round(&y, 4)?, [1.0, 0.8427, 1.0, 0.168]);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[0.0001, 0.4151, 0.0, 1.1033],
);
// Testing compared to pytorch nn.GELU(approximate = 'none')
//
// import torch
// import torch.nn.functional as F
// x = torch.tensor([3.0, 1.0, 4.0, 0.15], requires_grad=True)
// y = F.gelu(x, approximate='none')
// print(y)
// loss = y.sum()
// loss.backward()
// print(x.grad)
let y = x.gelu_erf()?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[2.9960, 0.8413, 3.9999, 0.0839]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[1.0119, 1.0833, 1.0005, 0.6188],
);
// Testing compared to pytorch elu
//
// import torch
// import torch.nn.functional as F
// x = torch.tensor([-1.0, 0.0, -2.0, 3.0], requires_grad=True)
// y = F.elu(x, alpha=2.0)
// print(y)
// loss = y.min
// loss = y.sum()
// loss.backward()
// print(x.grad)
let elu_x = Var::new(&[-1.0f32, 0., -2., 3.], device)?;
let y = elu_x.elu(2.)?;
let grads = y.backward()?;
let grad_x = grads.get(&elu_x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[-1.2642, 0.0000, -1.7293, 3.0000]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[0.7358, 2.0000, 0.2707, 1.0000]
);
// testing compared to pytorch nn.Silu()
let y = x.silu()?;
let grads = y.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[2.8577, 0.7311, 3.9281, 0.0806]
);
assert_eq!(
test_utils::to_vec1_round(grad_x, 4)?,
[1.0881, 0.9277, 1.0527, 0.5747],
);
if device.is_cpu() {
let x = Var::new(&[[[1f32, 2., 3.], [4., 5., 6.], [7., 8., 9.]]], device)?;
let y = x.interpolate1d(12)?.reshape(36)?;
let z = Tensor::new(
&[
1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16.,
17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32.,
33., 34., 35., 36.,
],
device,
)?;
let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?;
let grads = loss.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec3_round(grad_x, 4)?,
[[[10_f32, 26., 42.], [58., 74., 90.], [106., 122., 138.]]]
);
}
// manually checked: see comments
let x = Var::new(&[[[[1f32, 2., 3.], [4., 5., 6.], [7., 8., 9.]]]], device)?;
let y = x.interpolate2d(6, 6)?.reshape(36)?;
let z = Tensor::new(
&[
1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17.,
18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34.,
35., 36.,
],
device,
)?;
// gradient should be
// row 1
// 1+2+7+8 = 18
// 3+4+9+10 = 26
// 5+6+11+12 = 34
// row 2
// 13+14+19+20 = 66
// 15+16+21+22 = 74
// 17+18+23+24 = 82
// row 3
// 25+26+31+32 = 114
// 27+28+33+34 = 122
// 29+30+35+36 = 130
let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?;
let grads = loss.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec2_round(&grad_x.flatten(0, 2)?, 4)?,
[[18_f32, 26., 34.], [66., 74., 82.], [114., 122., 130.]]
);
// manually checked: see comments
let x = Var::new(&[[[[1f32, 2.], [4., 5.]]]], device)?;
let y = x.interpolate2d(6, 6)?.reshape(36)?;
let z = Tensor::new(
&[
1_f32, 02., 03., 04., 05., 06., 07., 08., 09., 10., 11., 12., 13., 14., 15., 16., 17.,
18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34.,
35., 36.,
],
device,
)?;
// gradient should be
// row 1
// 1+2+3+7+8+9+13+14+15 = 72
// 4+5+6+10+11+12+16+17+18 = 99
// row 2
// 19+20+21+25+26+27+31+32+33 = 234
// 22+23+24+28+29+30+34+35+36 = 243
let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?;
let grads = loss.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec2_round(&grad_x.flatten(0, 2)?, 4)?,
[[72_f32, 99.], [234., 261.]]
);
// manually checked: see comments
let x = Var::new(&[[[[1f32, 2.], [4., 5.]], [[6f32, 7.], [8., 9.]]]], device)?;
let y = x.interpolate2d(4, 4)?.reshape(32)?;
#[rustfmt::skip]
let z = Tensor::new(
&[
1_f32, 02., 03., 04.,
05., 06., 07., 08.,
09., 10., 11., 12.,
13., 14., 15., 16.,
17., 18., 19., 20.,
21., 22., 23., 24.,
25., 26., 27., 28.,
29., 30., 31., 32.
],
device,
)?;
// gradient should be
// m1r1
// 1+2+5+6=14
// 3+4+7+8=22
// m1r2
// 9+10+13+14=46
// 11+12+15+16=54
// m2r1
// 17+18+21+22=78
// 19+20+23+24=86
// m2r2
// 25+26+29+30=110
// 27+28+31+32=118
let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?;
let grads = loss.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec3_round(&grad_x.flatten(0, 1)?, 4)?,
[[[14_f32, 22.], [46., 54.]], [[78., 86.], [110., 118.]]]
);
// manually checked: see comments
let x = Var::new(
&[[[[1f32, 2.], [4., 5.]]], [[[6f32, 7.], [8., 9.]]]],
device,
)?;
let y = x.interpolate2d(4, 4)?.reshape(32)?;
#[rustfmt::skip]
let z = Tensor::new(
&[
1_f32, 02., 03., 04.,
05., 06., 07., 08.,
09., 10., 11., 12.,
13., 14., 15., 16.,
17., 18., 19., 20.,
21., 22., 23., 24.,
25., 26., 27., 28.,
29., 30., 31., 32.
],
device,
)?;
// gradient should be
// m1r1
// 1+2+5+6=14
// 3+4+7+8=22
// m1r2
// 9+10+13+14=46
// 11+12+15+16=54
// m2r1
// 17+18+21+22=78
// 19+20+23+24=86
// m2r2
// 25+26+29+30=110
// 27+28+31+32=118
let loss = y.unsqueeze(1)?.transpose(0, 1)?.matmul(&z.unsqueeze(1)?)?;
let grads = loss.backward()?;
let grad_x = grads.get(&x).context("no grad for x")?;
assert_eq!(
test_utils::to_vec3_round(&grad_x.flatten(0, 1)?, 4)?,
[[[14_f32, 22.], [46., 54.]], [[78., 86.], [110., 118.]]]
);
Ok(())
}
fn binary_grad(device: &Device) -> Result<()> {
let x = Var::new(&[3f32, 1., -4., -1.], device)?;
let x = x.as_tensor();
// leaky relu
let y = x.maximum(&(x * 0.1)?)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(x.to_vec1::<f32>()?, [3., 1., -4., -1.]);
assert_eq!(y.to_vec1::<f32>()?, [3., 1., -0.4, -0.1]);
assert_eq!(grad_x.to_vec1::<f32>()?, [1., 1., 0.1, 0.1]);
let y = x.minimum(&(x * 0.1)?)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [0.3, 0.1, -4., -1.]);
assert_eq!(grad_x.to_vec1::<f32>()?, [0.1, 0.1, 1., 1.]);
// This one is easy to mess up, we want the gradient to be one as it is the identity function.
let y = x.minimum(x)?;
let grads = y.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
assert_eq!(y.to_vec1::<f32>()?, [3., 1., -4., -1.]);
assert_eq!(grad_x.to_vec1::<f32>()?, [1., 1., 1., 1.]);
let x_var = Var::new(&[3f32, 1., -4., -1., 5., 9.], device)?;
let x = x_var.as_tensor();
let y_var = Var::new(&[2f32, 7., 1.], device)?;
let y = y_var.as_tensor();
let ss = x
.reshape((2, 3))?
.slice_scatter0(&y.reshape((1, 3))?, 1)?
.sqr()?;
let grads = ss.backward()?;
let grad_x = grads.get(x).context("no grad for x")?;
let grad_y = grads.get(y).context("no grad for y")?;
assert_eq!(ss.to_vec2::<f32>()?, [[9., 1., 16.], [4., 49., 1.]]);
assert_eq!(grad_x.to_vec1::<f32>()?, [6.0, 2.0, -8.0, 0.0, 0.0, 0.0]);
assert_eq!(grad_y.to_vec1::<f32>()?, [4.0, 14.0, 2.0]);
Ok(())
}
#[test]
fn test_flip_backprop() -> Result<()> {
let device = &Device::Cpu;
// Create a tensor (leaf node) that requires gradients
let x = Var::ones((2, 2), DType::F64, device)?;
let weights = Tensor::arange(1.0, 5.0, device)?.reshape((2, 2))?;
let y = x.matmul(&weights)?;
let expected_y = Tensor::from_vec(vec![4.0, 6.0, 4.0, 6.0], (2, 2), device)?;
candle_core::test_utils::assert_tensor_eq(&y, &expected_y)?;
let z = y.flip(&[1])?;
let expected_z = Tensor::from_vec(vec![6.0, 4.0, 6.0, 4.0], (2, 2), device)?;
candle_core::test_utils::assert_tensor_eq(&z, &expected_z)?;
let loss = z.sum_all()?;
let grad_store = loss.backward()?;
let grad_x = grad_store.get_id(x.id()).unwrap();
let flipped_weights = weights.flip(&[1])?;
let dloss_dy = Tensor::ones((2, 2), DType::F64, device)?;
// dloss/dx = dloss/dy @ dy/dx = ones @ weight.flip.T
let expected_grad = dloss_dy.matmul(&flipped_weights.t()?)?;
candle_core::test_utils::assert_tensor_eq(grad_x, &expected_grad)?;
Ok(())
}
test_device!(
simple_grad,
simple_grad_cpu,
simple_grad_gpu,
simple_grad_metal
);
test_device!(sum_grad, sum_grad_cpu, sum_grad_gpu, sum_grad_metal);
test_device!(
matmul_grad,
matmul_grad_cpu,
matmul_grad_gpu,
matmul_grad_metal
);
test_device!(
grad_descent,
grad_descent_cpu,
grad_descent_gpu,
grad_descent_metal
);
test_device!(unary_grad, unary_grad_cpu, unary_grad_gpu, unary_grad_metal);
test_device!(
binary_grad,
binary_grad_cpu,
binary_grad_gpu,
binary_grad_metal
);
| candle/candle-core/tests/grad_tests.rs/0 | {
"file_path": "candle/candle-core/tests/grad_tests.rs",
"repo_id": "candle",
"token_count": 9586
} | 31 |
# candle-datasets
| candle/candle-datasets/README.md/0 | {
"file_path": "candle/candle-datasets/README.md",
"repo_id": "candle",
"token_count": 7
} | 32 |
//! BEiT: BERT Pre-Training of Image Transformers
//! https://github.com/microsoft/unilm/tree/master/beit
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use clap::Parser;
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{Module, VarBuilder};
use candle_transformers::models::beit;
/// Loads an image from disk using the image crate, this returns a tensor with shape
/// (3, 384, 384). Beit special normalization is applied.
pub fn load_image384_beit_norm<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> {
let img = image::ImageReader::open(p)?
.decode()
.map_err(candle::Error::wrap)?
.resize_to_fill(384, 384, image::imageops::FilterType::Triangle);
let img = img.to_rgb8();
let data = img.into_raw();
let data = Tensor::from_vec(data, (384, 384, 3), &Device::Cpu)?.permute((2, 0, 1))?;
let mean = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?;
let std = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?;
(data.to_dtype(candle::DType::F32)? / 255.)?
.broadcast_sub(&mean)?
.broadcast_div(&std)
}
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let image = load_image384_beit_norm(args.image)?.to_device(&device)?;
println!("loaded image {image:?}");
let model_file = match args.model {
None => {
let api = hf_hub::api::sync::Api::new()?;
let api = api.model("vincent-espitalier/candle-beit".into());
api.get("beit_base_patch16_384.in22k_ft_in22k_in1k.safetensors")?
}
Some(model) => model.into(),
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
let model = beit::vit_base(vb)?;
println!("model built");
let logits = model.forward(&image.unsqueeze(0)?)?;
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
.i(0)?
.to_vec1::<f32>()?;
let mut prs = prs.iter().enumerate().collect::<Vec<_>>();
prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for &(category_idx, pr) in prs.iter().take(5) {
println!(
"{:24}: {:.2}%",
candle_examples::imagenet::CLASSES[category_idx],
100. * pr
);
}
Ok(())
}
| candle/candle-examples/examples/beit/main.rs/0 | {
"file_path": "candle/candle-examples/examples/beit/main.rs",
"repo_id": "candle",
"token_count": 1178
} | 33 |
use anyhow::{Error as E, Result};
use candle::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::models::colpali::Model;
use candle_transformers::models::{colpali, paligemma};
use clap::Parser;
use hf_hub::{api::sync::Api, Repo, RepoType};
use image::DynamicImage;
use pdf2image::{RenderOptionsBuilder, PDF};
use tokenizers::Tokenizer;
struct PageRetriever {
model: Model,
config: paligemma::Config,
pdf: PDF,
device: Device,
tokenizer: Tokenizer,
range: pdf2image::Pages,
batch_size: usize,
top_k: usize,
}
impl PageRetriever {
fn new(
model: Model,
config: paligemma::Config,
pdf: PDF,
tokenizer: Tokenizer,
device: &Device,
range: Option<pdf2image::Pages>,
batch_size: usize,
top_k: usize,
) -> Self {
let page_count = pdf.page_count();
Self {
model,
config,
pdf,
device: device.clone(),
tokenizer,
range: range.unwrap_or_else(|| pdf2image::Pages::Range(1..=page_count)),
batch_size,
top_k,
}
}
fn get_images_from_pdf(&self) -> Result<Vec<DynamicImage>> {
let pages = self
.pdf
.render(self.range.clone(), RenderOptionsBuilder::default().build()?)?;
Ok(pages)
}
fn tokenize_batch(&self, prompts: Vec<&str>) -> Result<Tensor> {
let tokens = self.tokenizer.encode_batch(prompts, true).map_err(E::msg)?;
let token_ids = tokens
.iter()
.map(|tokens| {
let tokens = tokens.get_ids().to_vec();
Tensor::new(tokens.as_slice(), &self.device)
})
.collect::<candle::Result<Vec<_>>>()?;
let input = Tensor::stack(&token_ids, 0)?;
Ok(input)
}
fn images_to_tensor(
&self,
pages: &[DynamicImage],
image_size: usize,
) -> anyhow::Result<Tensor> {
let mut images = vec![];
for page in pages.iter() {
let img = page.resize_to_fill(
image_size as u32,
image_size as u32,
image::imageops::FilterType::Triangle,
);
let img = img.to_rgb8();
let img = img.into_raw();
let img = Tensor::from_vec(img, (image_size, image_size, 3), &Device::Cpu)?
.permute((2, 0, 1))?
.to_dtype(DType::F32)?
.affine(2. / 255., -1.)?;
images.push(img);
}
let images = Tensor::stack(&images, 0)?;
Ok(images)
}
fn retrieve(&mut self, prompt: &str) -> Result<Vec<usize>> {
let dtype = if self.device.is_cuda() {
DType::BF16
} else {
DType::F32
};
let dummy_prompt: &str = "Describe the image";
let input = self.tokenize_batch(vec![prompt])?;
let dummy_input = self.tokenize_batch(vec![dummy_prompt])?;
let pages = self.get_images_from_pdf()?;
let mut all_scores = Vec::new();
for batch in pages.chunks(self.batch_size) {
let page_images = self
.images_to_tensor(batch, self.config.vision_config.image_size)?
.to_device(&self.device)?
.to_dtype(dtype)?;
let dummy_input = dummy_input.repeat((page_images.dims()[0], 0))?;
let image_embeddings = self.model.forward_images(&page_images, &dummy_input)?;
let text_embeddings = self.model.forward_text(&input)?;
let scores = text_embeddings
.unsqueeze(1)?
.broadcast_matmul(&image_embeddings.unsqueeze(0)?.transpose(3, 2)?)?
.max(3)?
.sum(2)?;
let batch_scores: Vec<f32> = scores
.to_dtype(DType::F32)?
.to_vec2()?
.into_iter()
.flatten()
.collect();
all_scores.extend(batch_scores);
}
let mut indices: Vec<usize> = (0..all_scores.len()).collect();
indices.sort_by(|a, b| all_scores[*b].partial_cmp(&all_scores[*a]).unwrap());
let top_k_indices = indices[0..self.top_k].to_vec();
Ok(top_k_indices)
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
prompt: String,
/// number of top pages to show.
#[arg(long, default_value_t = 3)]
top_k: usize,
#[arg(long)]
model_id: Option<String>,
#[arg(long, default_value = "main")]
revision: String,
#[arg(long)]
tokenizer_file: Option<String>,
#[arg(long)]
weight_files: Option<String>,
#[arg(long)]
pdf: String,
#[arg(long)]
start: Option<u32>,
#[arg(long)]
end: Option<u32>,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
let api = Api::new()?;
let model_id = match &args.model_id {
Some(model_id) => model_id.to_string(),
None => "vidore/colpali-v1.2-merged".to_string(),
};
let repo = api.repo(Repo::with_revision(
model_id,
RepoType::Model,
args.revision,
));
let tokenizer_filename = match args.tokenizer_file {
Some(file) => std::path::PathBuf::from(file),
None => api
.repo(Repo::with_revision(
"vidore/colpali".to_string(),
RepoType::Model,
"main".to_string(),
))
.get("tokenizer.json")?,
};
let filenames = match args.weight_files {
Some(files) => files
.split(',')
.map(std::path::PathBuf::from)
.collect::<Vec<_>>(),
None => candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?,
};
let start = std::time::Instant::now();
let config: paligemma::Config = paligemma::Config::paligemma_3b_448();
println!("retrieved the files in {:?}", start.elapsed());
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let device = candle_examples::device(false)?;
let dtype = if device.is_cuda() {
DType::BF16
} else {
DType::F32
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? };
let model = colpali::Model::new(&config, vb)?;
let pdf = PDF::from_file(args.pdf)?;
// check if start and end given in arg
let range = if let (Some(start), Some(end)) = (args.start, args.end) {
pdf2image::Pages::Range(start..=end)
} else {
pdf2image::Pages::Range(1..=pdf.page_count()) // can use pdf2image::Pages::All but there is a bug in the library which causes the first page to rendered twice.
};
let mut retriever =
PageRetriever::new(model, config, pdf, tokenizer, &device, Some(range), 4, 3);
let top_k_indices = retriever.retrieve(&args.prompt)?;
println!("Prompt: {}", args.prompt);
println!(
"top {} page numbers that contain similarity to the prompt",
retriever.top_k
);
println!("-----------------------------------");
for index in top_k_indices {
println!("Page: {:?}", index + 1);
}
println!("-----------------------------------");
Ok(())
}
| candle/candle-examples/examples/colpali/main.rs/0 | {
"file_path": "candle/candle-examples/examples/colpali/main.rs",
"repo_id": "candle",
"token_count": 3864
} | 34 |
# candle-granite LLMs from IBM Research
[Granite](https://www.ibm.com/granite) is a family of Large Language Models built for business, to help drive trust and scalability in AI-driven applications.
## Running the example
```bash
$ cargo run --example granite --features metal -r -- --model-type "granite7b-instruct" \
--prompt "Explain how quantum computing differs from classical computing, focusing on key concepts like qubits, superposition, and entanglement. Describe two potential breakthroughs in the fields of drug discovery and cryptography. Offer a convincing argument for why businesses and governments should invest in quantum computing research now, emphasizing its future benefits and the risks of falling behind"
Explain how quantum computing differs from classical computing, focusing on key concepts like qubits, superposition, and entanglement. Describe two potential breakthroughs in the fields of drug discovery and cryptography. Offer a convincing argument for why businesses and governments should invest in quantum computing research now, emphasizing its future benefits and the risks of falling behind competitors.
In recent years, there has been significant interest in quantum computing due to its potential to revolutionize various fields, including drug discovery, cryptography, and optimization problems. Quantum computers, which leverage the principles of quantum mechanics, differ fundamentally from classical computers. Here are some of the key differences:
```
## Supported Models
There are two different modalities for the Granite family models: Language and Code.
### Granite for language
1. [Granite 7b Instruct](https://huggingface.co/ibm-granite/granite-7b-instruct)
| candle/candle-examples/examples/granite/README.md/0 | {
"file_path": "candle/candle-examples/examples/granite/README.md",
"repo_id": "candle",
"token_count": 371
} | 35 |
pub const DEFAULT_IMAGE_TOKEN: &str = "<image>";
pub const DEFAULT_IM_START_TOKEN: &str = "<im_start>";
pub const DEFAULT_IM_END_TOKEN: &str = "<im_end>";
pub const IMAGE_PLACEHOLDER: &str = "<image-placeholder>";
| candle/candle-examples/examples/llava/constants.rs/0 | {
"file_path": "candle/candle-examples/examples/llava/constants.rs",
"repo_id": "candle",
"token_count": 86
} | 36 |
# candle-mimi
[Mimi](https://huggingface.co/kyutai/mimi) is a state of the art audio
compression model using an encoder/decoder architecture with residual vector
quantization. The candle implementation supports streaming meaning that it's
possible to encode or decode a stream of audio tokens on the flight to provide
low latency interaction with an audio model.
## Running one example
Generating some audio tokens from an audio files.
```bash
wget https://github.com/metavoiceio/metavoice-src/raw/main/assets/bria.mp3
cargo run --example mimi --features mimi --release -- audio-to-code bria.mp3 bria.safetensors
```
And decoding the audio tokens back into a sound file.
```bash
cargo run --example mimi --features mimi --release -- code-to-audio bria.safetensors bria.wav
```
| candle/candle-examples/examples/mimi/README.md/0 | {
"file_path": "candle/candle-examples/examples/mimi/README.md",
"repo_id": "candle",
"token_count": 228
} | 37 |
use std::path::PathBuf;
use anyhow::{Error as E, Result};
use candle::{Device, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::models::modernbert;
use clap::{Parser, ValueEnum};
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::{PaddingParams, Tokenizer};
#[derive(Debug, Clone, ValueEnum)]
enum Model {
ModernBertBase,
ModernBertLarge,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
model_id: Option<String>,
#[arg(long, default_value = "main")]
revision: String,
#[arg(long, default_value = "modern-bert-base")]
model: Model,
// Path to the tokenizer file.
#[arg(long)]
tokenizer_file: Option<String>,
// Path to the weight files.
#[arg(long)]
weight_files: Option<String>,
// Path to the config file.
#[arg(long)]
config_file: Option<String>,
/// When set, compute embeddings for this prompt.
#[arg(long)]
prompt: Option<String>,
}
fn main() -> Result<()> {
let args = Args::parse();
let api = Api::new()?;
let model_id = match &args.model_id {
Some(model_id) => model_id.to_string(),
None => match args.model {
Model::ModernBertBase => "answerdotai/ModernBERT-base".to_string(),
Model::ModernBertLarge => "answerdotai/ModernBERT-large".to_string(),
},
};
let repo = api.repo(Repo::with_revision(
model_id,
RepoType::Model,
args.revision,
));
let tokenizer_filename = match args.tokenizer_file {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("tokenizer.json")?,
};
let config_filename = match args.config_file {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("config.json")?,
};
let weights_filename = match args.weight_files {
Some(files) => PathBuf::from(files),
None => match repo.get("model.safetensors") {
Ok(safetensors) => safetensors,
Err(_) => match repo.get("pytorch_model.bin") {
Ok(pytorch_model) => pytorch_model,
Err(e) => {
anyhow::bail!("Model weights not found. The weights should either be a `model.safetensors` or `pytorch_model.bin` file. Error: {e}")
}
},
},
};
let config = std::fs::read_to_string(config_filename)?;
let config: modernbert::Config = serde_json::from_str(&config)?;
let mut tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let device = candle_examples::device(args.cpu)?;
let vb = if weights_filename.ends_with("model.safetensors") {
unsafe {
VarBuilder::from_mmaped_safetensors(&[weights_filename], candle::DType::F32, &device)
.unwrap()
}
} else {
println!("Loading weights from pytorch_model.bin");
VarBuilder::from_pth(&weights_filename, candle::DType::F32, &device).unwrap()
};
tokenizer
.with_padding(Some(PaddingParams {
strategy: tokenizers::PaddingStrategy::BatchLongest,
pad_id: config.pad_token_id,
..Default::default()
}))
.with_truncation(None)
.map_err(E::msg)?;
let prompt = match &args.prompt {
Some(p) => vec![p.as_str()],
None => vec![
"Hello I'm a [MASK] model.",
"I'm a [MASK] boy.",
"I'm [MASK] in berlin.",
"The capital of France is [MASK].",
],
};
let model = modernbert::ModernBertForMaskedLM::load(vb, &config)?;
let input_ids = tokenize_batch(&tokenizer, prompt.clone(), &device)?;
let attention_mask = get_attention_mask(&tokenizer, prompt.clone(), &device)?;
let output = model
.forward(&input_ids, &attention_mask)?
.to_dtype(candle::DType::F32)?;
let max_outs = output.argmax(2)?;
let max_out = max_outs.to_vec2::<u32>()?;
let max_out_refs: Vec<&[u32]> = max_out.iter().map(|v| v.as_slice()).collect();
let decoded = tokenizer.decode_batch(&max_out_refs, true).unwrap();
for (i, sentence) in decoded.iter().enumerate() {
println!("Sentence: {} : {}", i + 1, sentence);
}
Ok(())
}
pub fn tokenize_batch(
tokenizer: &Tokenizer,
input: Vec<&str>,
device: &Device,
) -> anyhow::Result<Tensor> {
let tokens = tokenizer.encode_batch(input, true).map_err(E::msg)?;
let token_ids = tokens
.iter()
.map(|tokens| {
let tokens = tokens.get_ids().to_vec();
Tensor::new(tokens.as_slice(), device)
})
.collect::<candle::Result<Vec<_>>>()?;
Ok(Tensor::stack(&token_ids, 0)?)
}
pub fn get_attention_mask(
tokenizer: &Tokenizer,
input: Vec<&str>,
device: &Device,
) -> anyhow::Result<Tensor> {
let tokens = tokenizer.encode_batch(input, true).map_err(E::msg)?;
let attention_mask = tokens
.iter()
.map(|tokens| {
let tokens = tokens.get_attention_mask().to_vec();
Tensor::new(tokens.as_slice(), device)
})
.collect::<candle::Result<Vec<_>>>()?;
Ok(Tensor::stack(&attention_mask, 0)?)
}
| candle/candle-examples/examples/modernbert/main.rs/0 | {
"file_path": "candle/candle-examples/examples/modernbert/main.rs",
"repo_id": "candle",
"token_count": 2425
} | 38 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::{Error as E, Result};
use clap::Parser;
use candle::{DType, Device, IndexOp, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::models::llama::{Cache, Llama, LlamaConfig};
use candle_transformers::models::snac::{Config as SnacConfig, Model as SnacModel};
use tokenizers::Tokenizer;
// https://github.com/canopyai/Orpheus-TTS/blob/df0b0d96685dd21885aef7f900ee7f705c669e94/realtime_streaming_example/main.py#L43
const STOP_TOKEN_ID: u32 = 128258;
#[derive(Parser)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// Display the token for the specified prompt.
#[arg(long)]
verbose_prompt: bool,
#[arg(long, default_value = "Hey, how are you doing today?")]
prompt: String,
/// The temperature used to generate samples.
#[arg(long, default_value_t = 0.6)]
temperature: f64,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// Only sample among the top K samples.
#[arg(long)]
top_k: Option<usize>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
#[arg(long)]
model_file: Option<String>,
#[arg(long)]
tokenizer_file: Option<String>,
#[arg(long)]
config_file: Option<String>,
/// The output wav file.
#[arg(long, default_value = "out.wav")]
out_file: String,
#[arg(long, default_value = "3b-0.1-ft")]
which: Which,
#[arg(long, default_value = "tara")]
voice: Voice,
#[arg(long)]
use_flash_attn: bool,
}
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum Voice {
#[value(name = "tara")]
Tara,
#[value(name = "leah")]
Leah,
#[value(name = "jess")]
Jess,
#[value(name = "leo")]
Leo,
#[value(name = "dan")]
Dan,
#[value(name = "mia")]
Mia,
#[value(name = "zac")]
Zac,
#[value(name = "zoe")]
Zoe,
}
impl Voice {
fn as_str(&self) -> &'static str {
match self {
Voice::Tara => "tara",
Voice::Leah => "leah",
Voice::Jess => "jess",
Voice::Leo => "leo",
Voice::Dan => "dan",
Voice::Mia => "mia",
Voice::Zac => "zac",
Voice::Zoe => "zoe",
}
}
}
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum Which {
#[value(name = "3b-0.1-ft")]
ThreeB0_1Ft,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
let prompt = args.prompt.clone();
let mut model = Model::load(args)?;
model.run(&prompt)?;
Ok(())
}
struct Model {
model: Llama,
tokenizer: Tokenizer,
logits_processor: candle_transformers::generation::LogitsProcessor,
cache: Cache,
device: Device,
verbose_prompt: bool,
snac: SnacModel,
out_file: String,
voice: Voice,
}
fn load_snac(device: &Device) -> Result<SnacModel> {
let api = hf_hub::api::sync::Api::new()?;
let m = api.model("hubertsiuzdak/snac_24khz".to_string());
let config = m.get("config.json")?;
let config: SnacConfig = serde_json::from_reader(std::fs::File::open(config)?)?;
let m = api.model("lmz/candle-snac".to_string());
let model = m.get("snac_24khz.safetensors")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, device)? };
let model = SnacModel::new(&config, vb)?;
Ok(model)
}
impl Model {
fn load(args: Args) -> Result<Self> {
let start = std::time::Instant::now();
let api = hf_hub::api::sync::Api::new()?;
let model_id = match args.model_id {
Some(model_id) => model_id.to_string(),
None => match args.which {
Which::ThreeB0_1Ft => "canopylabs/orpheus-3b-0.1-ft".to_string(),
},
};
let revision = match args.revision {
Some(r) => r,
None => "main".to_string(),
};
let repo = api.repo(hf_hub::Repo::with_revision(
model_id,
hf_hub::RepoType::Model,
revision,
));
let model_files = match args.model_file {
Some(m) => vec![m.into()],
None => match args.which {
Which::ThreeB0_1Ft => {
candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?
}
},
};
let config = match args.config_file {
Some(m) => m.into(),
None => repo.get("config.json")?,
};
let tokenizer = match args.tokenizer_file {
Some(m) => m.into(),
None => repo.get("tokenizer.json")?,
};
println!("retrieved the files in {:?}", start.elapsed());
let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?;
let start = std::time::Instant::now();
let device = candle_examples::device(args.cpu)?;
let dtype = device.bf16_default_to_f32();
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&model_files, dtype, &device)? };
let config: LlamaConfig = serde_json::from_reader(std::fs::File::open(config)?)?;
let config = config.into_config(args.use_flash_attn);
let model = Llama::load(vb, &config)?;
let logits_processor = {
use candle_transformers::generation::{LogitsProcessor, Sampling};
let temperature = args.temperature;
let sampling = if temperature <= 0. {
Sampling::ArgMax
} else {
match (args.top_k.as_ref(), args.top_p.as_ref()) {
(None, None) => Sampling::All { temperature },
(Some(&k), None) => Sampling::TopK { k, temperature },
(None, Some(&p)) => Sampling::TopP { p, temperature },
(Some(&k), Some(&p)) => Sampling::TopKThenTopP { k, p, temperature },
}
};
LogitsProcessor::from_sampling(args.seed, sampling)
};
println!("loaded the model in {:?}", start.elapsed());
let cache = Cache::new(true, dtype, &config, &device)?;
let snac = load_snac(&device)?;
Ok(Self {
model,
tokenizer,
logits_processor,
cache,
device,
verbose_prompt: args.verbose_prompt,
snac,
voice: args.voice,
out_file: args.out_file,
})
}
fn run(&mut self, prompt: &str) -> Result<()> {
println!("running the model on '{prompt}'");
let device = &self.device;
let prompt = format!("{voice}: {prompt}", voice = self.voice.as_str());
let tokens = self.tokenizer.encode(prompt, true).map_err(E::msg)?;
// https://github.com/canopyai/Orpheus-TTS/blob/df0b0d96685dd21885aef7f900ee7f705c669e94/orpheus_tts_pypi/orpheus_tts/engine_class.py#L82
let mut tokens = [
&[128259],
tokens.get_ids(),
&[128009, 128260, 128261, 128257],
]
.concat();
if self.verbose_prompt {
println!("{tokens:?}");
}
let mut cache = self.cache.clone();
println!("starting the inference loop");
let mut index_pos = 0;
let mut audio_tokens = vec![];
for index in 0..2000 {
let (context_size, context_index) = if index > 0 {
(1, index_pos)
} else {
(tokens.len(), 0)
};
let ctxt = &tokens[tokens.len().saturating_sub(context_size)..];
let input = Tensor::new(ctxt, device)?.unsqueeze(0)?;
let logits = self.model.forward(&input, context_index, &mut cache)?;
let logits = logits.squeeze(0)?;
index_pos += ctxt.len();
let next_token = self.logits_processor.sample(&logits)?;
if let Some(tok) = self.tokenizer.id_to_token(next_token) {
match tok.strip_prefix("<custom_token_") {
Some(tok) => match tok.strip_suffix('>') {
Some(tok) => {
let tok = tok.parse::<u32>()?;
// https://github.com/canopyai/Orpheus-TTS/blob/df0b0d96685dd21885aef7f900ee7f705c669e94/orpheus_tts_pypi/orpheus_tts/decoder.py#L86C35-L86C63
let tok = tok - 10 - ((audio_tokens.len() as u32 % 7) * 4096);
audio_tokens.push(tok);
}
None => {
println!("{index}: unexpected custom token {next_token} {tok}");
}
},
None => {
println!("{index}: unexpected token {next_token} {tok}");
}
}
}
if next_token == STOP_TOKEN_ID {
println!("reached stop token");
break;
}
tokens.push(next_token);
}
println!("generated {} audio tokens", audio_tokens.len());
let mut codes0 = vec![];
let mut codes1 = vec![];
let mut codes2 = vec![];
for audio_tokens in audio_tokens.chunks_exact(7) {
codes0.push(audio_tokens[0]);
for i in [1, 4] {
codes1.push(audio_tokens[i]);
}
for i in [2, 3, 5, 6] {
codes2.push(audio_tokens[i]);
}
}
let codes0 = Tensor::new(codes0, device)?.unsqueeze(0)?;
let codes1 = Tensor::new(codes1, device)?.unsqueeze(0)?;
let codes2 = Tensor::new(codes2, device)?.unsqueeze(0)?;
let pcm = self.snac.decode(&[&codes0, &codes1, &codes2])?;
println!("decoded to pcm {pcm:?}");
let mut output = std::fs::File::create(&self.out_file)?;
let pcm = pcm.i(0)?.i(0)?.to_vec1::<f32>()?;
candle_examples::wav::write_pcm_as_wav(&mut output, &pcm, 24000)?;
Ok(())
}
}
| candle/candle-examples/examples/orpheus/main.rs/0 | {
"file_path": "candle/candle-examples/examples/orpheus/main.rs",
"repo_id": "candle",
"token_count": 5468
} | 39 |
# candle-quantized-qwen3
[Qwen3]((https://qwenlm.github.io/blog/qwen3/)) is an upgraded version of Qwen2.5, released by Alibaba Cloud.
## Running the example
```bash
cargo run --example quantized-qwen3 --release -- --prompt "Write a function to count prime numbers up to N."
```
0.6b is used by default, 1.7b, 4b, 8b, 14b, and 32b models are available via `--which` argument.
```bash
cargo run --example quantized-qwen3 --release -- --which 4b --prompt "A train is travelling at 120mph, how far does it travel in 3 minutes 30 seconds?"
```
| candle/candle-examples/examples/quantized-qwen3/README.md/0 | {
"file_path": "candle/candle-examples/examples/quantized-qwen3/README.md",
"repo_id": "candle",
"token_count": 186
} | 40 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::Result;
use clap::{Parser, Subcommand};
mod gym_env;
mod vec_gym_env;
mod ddpg;
mod dqn;
mod policy_gradient;
#[derive(Parser)]
struct Args {
#[command(subcommand)]
command: Command,
}
#[derive(Subcommand)]
enum Command {
Pg,
Ddpg,
Dqn,
}
fn main() -> Result<()> {
let args = Args::parse();
match args.command {
Command::Pg => policy_gradient::run()?,
Command::Ddpg => ddpg::run()?,
Command::Dqn => dqn::run()?,
}
Ok(())
}
| candle/candle-examples/examples/reinforcement-learning/main.rs/0 | {
"file_path": "candle/candle-examples/examples/reinforcement-learning/main.rs",
"repo_id": "candle",
"token_count": 277
} | 41 |
use anyhow::{Ok, Result};
use candle::{DType, IndexOp, Tensor};
use candle_transformers::models::flux;
use candle_transformers::models::mmdit::model::MMDiT;
pub struct SkipLayerGuidanceConfig {
pub scale: f64,
pub start: f64,
pub end: f64,
pub layers: Vec<usize>,
}
#[allow(clippy::too_many_arguments)]
pub fn euler_sample(
mmdit: &MMDiT,
y: &Tensor,
context: &Tensor,
num_inference_steps: usize,
cfg_scale: f64,
time_shift: f64,
height: usize,
width: usize,
slg_config: Option<SkipLayerGuidanceConfig>,
) -> Result<Tensor> {
let mut x = flux::sampling::get_noise(1, height, width, y.device())?.to_dtype(DType::F16)?;
let sigmas = (0..=num_inference_steps)
.map(|x| x as f64 / num_inference_steps as f64)
.rev()
.map(|x| time_snr_shift(time_shift, x))
.collect::<Vec<f64>>();
for (step, window) in sigmas.windows(2).enumerate() {
let (s_curr, s_prev) = match window {
[a, b] => (a, b),
_ => continue,
};
let timestep = (*s_curr) * 1000.0;
let noise_pred = mmdit.forward(
&Tensor::cat(&[&x, &x], 0)?,
&Tensor::full(timestep as f32, (2,), x.device())?.contiguous()?,
y,
context,
None,
)?;
let mut guidance = apply_cfg(cfg_scale, &noise_pred)?;
if let Some(slg_config) = slg_config.as_ref() {
if (num_inference_steps as f64) * slg_config.start < (step as f64)
&& (step as f64) < (num_inference_steps as f64) * slg_config.end
{
let slg_noise_pred = mmdit.forward(
&x,
&Tensor::full(timestep as f32, (1,), x.device())?.contiguous()?,
&y.i(..1)?,
&context.i(..1)?,
Some(&slg_config.layers),
)?;
guidance = (guidance
+ (slg_config.scale * (noise_pred.i(..1)? - slg_noise_pred.i(..1))?)?)?;
}
}
x = (x + (guidance * (*s_prev - *s_curr))?)?;
}
Ok(x)
}
// The "Resolution-dependent shifting of timestep schedules" recommended in the SD3 tech report paper
// https://arxiv.org/pdf/2403.03206
// Following the implementation in ComfyUI:
// https://github.com/comfyanonymous/ComfyUI/blob/3c60ecd7a83da43d694e26a77ca6b93106891251/
// comfy/model_sampling.py#L181
fn time_snr_shift(alpha: f64, t: f64) -> f64 {
alpha * t / (1.0 + (alpha - 1.0) * t)
}
fn apply_cfg(cfg_scale: f64, noise_pred: &Tensor) -> Result<Tensor> {
Ok(((cfg_scale * noise_pred.narrow(0, 0, 1)?)?
- ((cfg_scale - 1.0) * noise_pred.narrow(0, 1, 1)?)?)?)
}
| candle/candle-examples/examples/stable-diffusion-3/sampling.rs/0 | {
"file_path": "candle/candle-examples/examples/stable-diffusion-3/sampling.rs",
"repo_id": "candle",
"token_count": 1404
} | 42 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Error as E;
use clap::{Parser, ValueEnum};
use candle::{DType, Tensor};
use candle_examples::token_output_stream::TokenOutputStream;
use candle_nn::VarBuilder;
use candle_transformers::models::{trocr, vit};
use tokenizers::Tokenizer;
mod image_processor;
#[derive(Clone, Debug, Copy, ValueEnum)]
enum Which {
#[value(name = "base")]
BaseHandwritten,
#[value(name = "large")]
LargeHandwritten,
BasePrinted,
LargePrinted,
}
impl Which {
fn repo_and_branch_name(&self) -> (&str, &str) {
match self {
Self::BaseHandwritten => ("microsoft/trocr-base-handwritten", "refs/pr/3"),
Self::LargeHandwritten => ("microsoft/trocr-large-handwritten", "refs/pr/6"),
Self::BasePrinted => ("microsoft/trocr-base-printed", "refs/pr/7"),
Self::LargePrinted => ("microsoft/trocr-large-printed", "main"),
}
}
}
#[derive(Debug, Clone, serde::Deserialize)]
struct Config {
encoder: vit::Config,
decoder: trocr::TrOCRConfig,
}
#[derive(Parser, Debug)]
struct Args {
#[arg(long)]
model: Option<String>,
/// Choose the variant of the model to run.
#[arg(long, default_value = "base")]
which: Which,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// The image file to be processed.
#[arg(long)]
image: String,
/// Tokenization config.
#[arg(long)]
tokenizer: Option<String>,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let api = hf_hub::api::sync::Api::new()?;
let mut tokenizer_dec = {
let tokenizer_file = match args.tokenizer {
None => api
.model(String::from("ToluClassics/candle-trocr-tokenizer"))
.get("tokenizer.json")?,
Some(tokenizer) => std::path::PathBuf::from(tokenizer),
};
let tokenizer = Tokenizer::from_file(&tokenizer_file).map_err(E::msg)?;
TokenOutputStream::new(tokenizer)
};
let device = candle_examples::device(args.cpu)?;
let vb = {
let model = match args.model {
Some(model) => std::path::PathBuf::from(model),
None => {
let (repo, branch) = args.which.repo_and_branch_name();
api.repo(hf_hub::Repo::with_revision(
repo.to_string(),
hf_hub::RepoType::Model,
branch.to_string(),
))
.get("model.safetensors")?
}
};
println!("model: {model:?}");
unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? }
};
let (encoder_config, decoder_config) = {
let (repo, branch) = args.which.repo_and_branch_name();
let config_filename = api
.repo(hf_hub::Repo::with_revision(
repo.to_string(),
hf_hub::RepoType::Model,
branch.to_string(),
))
.get("config.json")?;
let config: Config = serde_json::from_reader(std::fs::File::open(config_filename)?)?;
(config.encoder, config.decoder)
};
let mut model = trocr::TrOCRModel::new(&encoder_config, &decoder_config, vb)?;
let processor_config = image_processor::ProcessorConfig::default();
let processor = image_processor::ViTImageProcessor::new(&processor_config);
let image = vec![args.image.as_str()];
let image = processor.preprocess(image)?.to_device(&device)?;
let encoder_xs = model.encoder().forward(&image)?;
let mut logits_processor =
candle_transformers::generation::LogitsProcessor::new(1337, None, None);
let mut token_ids: Vec<u32> = vec![decoder_config.decoder_start_token_id];
for index in 0..1000 {
let context_size = if index >= 1 { 1 } else { token_ids.len() };
let start_pos = token_ids.len().saturating_sub(context_size);
let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?;
let logits = model.decode(&input_ids, &encoder_xs, start_pos)?;
let logits = logits.squeeze(0)?;
let logits = logits.get(logits.dim(0)? - 1)?;
let token = logits_processor.sample(&logits)?;
token_ids.push(token);
if let Some(t) = tokenizer_dec.next_token(token)? {
use std::io::Write;
print!("{t}");
std::io::stdout().flush()?;
}
if token == decoder_config.eos_token_id {
break;
}
}
if let Some(rest) = tokenizer_dec.decode_rest().map_err(E::msg)? {
print!("{rest}");
}
println!();
Ok(())
}
| candle/candle-examples/examples/trocr/main.rs/0 | {
"file_path": "candle/candle-examples/examples/trocr/main.rs",
"repo_id": "candle",
"token_count": 2167
} | 43 |
// https://github.com/openai/whisper/blob/main/whisper/model.py/rgs
// TODO:
// - Batch size greater than 1.
// - More token filters (SuppressBlanks, ApplyTimestampRules).
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use anyhow::{Error as E, Result};
use candle::{Device, IndexOp, Tensor};
use candle_nn::{
ops::{log_softmax, softmax},
VarBuilder,
};
use clap::{Parser, ValueEnum};
use hf_hub::{api::sync::Api, Repo, RepoType};
use rand::distr::weighted::WeightedIndex;
use rand::distr::Distribution;
use rand::SeedableRng;
use tokenizers::Tokenizer;
mod multilingual;
use candle_transformers::models::whisper::{self as m, audio, Config};
pub enum Model {
Normal(m::model::Whisper),
Quantized(m::quantized_model::Whisper),
}
// Maybe we should use some traits rather than doing the dispatch for all these.
impl Model {
pub fn config(&self) -> &Config {
match self {
Self::Normal(m) => &m.config,
Self::Quantized(m) => &m.config,
}
}
pub fn encoder_forward(&mut self, x: &Tensor, flush: bool) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.encoder.forward(x, flush),
Self::Quantized(m) => m.encoder.forward(x, flush),
}
}
pub fn decoder_forward(
&mut self,
x: &Tensor,
xa: &Tensor,
flush: bool,
) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.decoder.forward(x, xa, flush),
Self::Quantized(m) => m.decoder.forward(x, xa, flush),
}
}
pub fn decoder_final_linear(&self, x: &Tensor) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.decoder.final_linear(x),
Self::Quantized(m) => m.decoder.final_linear(x),
}
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
struct DecodingResult {
tokens: Vec<u32>,
text: String,
avg_logprob: f64,
no_speech_prob: f64,
temperature: f64,
compression_ratio: f64,
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
struct Segment {
start: f64,
duration: f64,
dr: DecodingResult,
}
struct Decoder {
model: Model,
rng: rand::rngs::StdRng,
task: Option<Task>,
timestamps: bool,
max_initial_timestamp_index: Option<u32>,
verbose: bool,
tokenizer: Tokenizer,
suppress_tokens: Tensor,
sot_token: u32,
transcribe_token: u32,
translate_token: u32,
eot_token: u32,
no_speech_token: u32,
no_timestamps_token: u32,
language_token: Option<u32>,
}
impl Decoder {
#[allow(clippy::too_many_arguments)]
fn new(
model: Model,
tokenizer: Tokenizer,
seed: u64,
device: &Device,
language_token: Option<u32>,
task: Option<Task>,
timestamps: bool,
max_initial_timestamp_index: Option<u32>,
verbose: bool,
) -> Result<Self> {
let no_timestamps_token = token_id(&tokenizer, m::NO_TIMESTAMPS_TOKEN)?;
// Suppress the notimestamps token when in timestamps mode.
// https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L452
let suppress_tokens: Vec<f32> = (0..model.config().vocab_size as u32)
.map(|i| {
if model.config().suppress_tokens.contains(&i)
|| timestamps && i == no_timestamps_token
{
f32::NEG_INFINITY
} else {
0f32
}
})
.collect();
let suppress_tokens = Tensor::new(suppress_tokens.as_slice(), device)?;
let sot_token = token_id(&tokenizer, m::SOT_TOKEN)?;
let transcribe_token = token_id(&tokenizer, m::TRANSCRIBE_TOKEN)?;
let translate_token = token_id(&tokenizer, m::TRANSLATE_TOKEN)?;
let eot_token = token_id(&tokenizer, m::EOT_TOKEN)?;
let no_speech_token = m::NO_SPEECH_TOKENS
.iter()
.find_map(|token| token_id(&tokenizer, token).ok());
let no_speech_token = match no_speech_token {
None => anyhow::bail!("unable to find any non-speech token"),
Some(n) => n,
};
Ok(Self {
model,
rng: rand::rngs::StdRng::seed_from_u64(seed),
tokenizer,
task,
timestamps,
max_initial_timestamp_index,
verbose,
suppress_tokens,
sot_token,
transcribe_token,
translate_token,
eot_token,
no_speech_token,
language_token,
no_timestamps_token,
})
}
fn decode(&mut self, mel: &Tensor, t: f64) -> Result<DecodingResult> {
let audio_features = self.model.encoder_forward(mel, true)?;
if self.verbose {
println!("audio features: {:?}", audio_features.dims());
}
let sample_len = self.model.config().max_target_positions / 2;
let mut sum_logprob = 0f64;
let mut no_speech_prob = f64::NAN;
let mut tokens = vec![self.sot_token];
if let Some(language_token) = self.language_token {
tokens.push(language_token);
}
match self.task {
None | Some(Task::Transcribe) => tokens.push(self.transcribe_token),
Some(Task::Translate) => tokens.push(self.translate_token),
}
if !self.timestamps {
tokens.push(self.no_timestamps_token);
}
for i in 0..sample_len {
let tokens_t = Tensor::new(tokens.as_slice(), mel.device())?;
// The model expects a batch dim but this inference loop does not handle
// it so we add it at this point.
let tokens_t = tokens_t.unsqueeze(0)?;
let ys = self
.model
.decoder_forward(&tokens_t, &audio_features, i == 0)?;
// Extract the no speech probability on the first iteration by looking at the first
// token logits and the probability for the according token.
if i == 0 {
let logits = self.model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?;
no_speech_prob = softmax(&logits, 0)?
.i(self.no_speech_token as usize)?
.to_scalar::<f32>()? as f64;
}
let (_, seq_len, _) = ys.dims3()?;
let logits = self
.model
.decoder_final_linear(&ys.i((..1, seq_len - 1..))?)?
.i(0)?
.i(0)?;
// Apply timestamp rules when timestamps are enabled
let logits = if self.timestamps {
self.apply_timestamp_rules(&logits, &tokens)?
} else {
logits
};
let logits = logits.broadcast_add(&self.suppress_tokens)?;
let next_token = if t > 0f64 {
let prs = softmax(&(&logits / t)?, 0)?;
let logits_v: Vec<f32> = prs.to_vec1()?;
let distr = WeightedIndex::new(&logits_v)?;
distr.sample(&mut self.rng) as u32
} else {
let logits_v: Vec<f32> = logits.to_vec1()?;
logits_v
.iter()
.enumerate()
.max_by(|(_, u), (_, v)| u.total_cmp(v))
.map(|(i, _)| i as u32)
.unwrap()
};
tokens.push(next_token);
let prob = softmax(&logits, candle::D::Minus1)?
.i(next_token as usize)?
.to_scalar::<f32>()? as f64;
if next_token == self.eot_token
|| tokens.len() > self.model.config().max_target_positions
{
break;
}
sum_logprob += prob.ln();
}
let text = self.tokenizer.decode(&tokens, true).map_err(E::msg)?;
let avg_logprob = sum_logprob / tokens.len() as f64;
Ok(DecodingResult {
tokens,
text,
avg_logprob,
no_speech_prob,
temperature: t,
compression_ratio: f64::NAN,
})
}
fn decode_with_fallback(&mut self, segment: &Tensor) -> Result<DecodingResult> {
for (i, &t) in m::TEMPERATURES.iter().enumerate() {
let dr: Result<DecodingResult> = self.decode(segment, t);
if i == m::TEMPERATURES.len() - 1 {
return dr;
}
// On errors, we try again with a different temperature.
match dr {
Ok(dr) => {
let needs_fallback = dr.compression_ratio > m::COMPRESSION_RATIO_THRESHOLD
|| dr.avg_logprob < m::LOGPROB_THRESHOLD;
if !needs_fallback || dr.no_speech_prob > m::NO_SPEECH_THRESHOLD {
return Ok(dr);
}
}
Err(err) => {
println!("Error running at {t}: {err}")
}
}
}
unreachable!()
}
fn apply_timestamp_rules(&self, input_logits: &Tensor, tokens: &[u32]) -> Result<Tensor> {
let device = input_logits.device().clone();
let timestamp_begin = self.no_timestamps_token + 1;
let vocab_size = self.model.config().vocab_size as u32;
// ========== SETUP: Extract sampled tokens for analysis ==========
let sample_begin = if self.language_token.is_some() { 3 } else { 2 };
let sampled_tokens = if tokens.len() > sample_begin {
&tokens[sample_begin..]
} else {
&[]
};
let mut masks = Vec::new();
// Pre-allocate reusable mask buffer to avoid repeated allocations
let mut mask_buffer = vec![0.0f32; vocab_size as usize];
// ========== RULE 1: Timestamp pairing constraints ==========
// Timestamps must come in pairs, except directly before EOT
if !sampled_tokens.is_empty() {
let last_was_timestamp = sampled_tokens
.last()
.map(|&t| t >= timestamp_begin)
.unwrap_or(false);
let penultimate_was_timestamp = if sampled_tokens.len() >= 2 {
sampled_tokens[sampled_tokens.len() - 2] >= timestamp_begin
} else {
false
};
if last_was_timestamp {
if penultimate_was_timestamp {
// Has to be non-timestamp - suppress timestamp tokens
for i in 0..vocab_size {
mask_buffer[i as usize] = if i >= timestamp_begin {
f32::NEG_INFINITY
} else {
0.0
};
}
masks.push(Tensor::new(mask_buffer.as_slice(), &device)?);
} else {
// Cannot be normal text tokens - suppress everything before EOT
for i in 0..vocab_size {
mask_buffer[i as usize] = if i < self.eot_token {
f32::NEG_INFINITY
} else {
0.0
};
}
masks.push(Tensor::new(mask_buffer.as_slice(), &device)?);
}
}
// ========== RULE 2: Non-decreasing timestamp constraint ==========
// Timestamps shouldn't decrease; forbid timestamp tokens smaller than the last
let timestamp_tokens: Vec<u32> = sampled_tokens
.iter()
.filter(|&&t| t >= timestamp_begin)
.cloned()
.collect();
if !timestamp_tokens.is_empty() {
let timestamp_last = if last_was_timestamp && !penultimate_was_timestamp {
*timestamp_tokens.last().unwrap()
} else {
timestamp_tokens.last().unwrap() + 1
};
for i in 0..vocab_size {
mask_buffer[i as usize] = if i >= timestamp_begin && i < timestamp_last {
f32::NEG_INFINITY
} else {
0.0
};
}
masks.push(Tensor::new(mask_buffer.as_slice(), &device)?);
}
}
// ========== RULE 3: Force initial timestamp ==========
// At the beginning, suppress generating non-timestamp tokens
if tokens.len() == sample_begin {
for i in 0..vocab_size {
mask_buffer[i as usize] = if i < timestamp_begin {
f32::NEG_INFINITY
} else {
0.0
};
}
masks.push(Tensor::new(mask_buffer.as_slice(), &device)?);
// Apply the max_initial_timestamp constraint
if let Some(max_initial_timestamp_index) = self.max_initial_timestamp_index {
let last_allowed = timestamp_begin + max_initial_timestamp_index;
if last_allowed < vocab_size {
for i in 0..vocab_size {
mask_buffer[i as usize] = if i > last_allowed {
f32::NEG_INFINITY
} else {
0.0
};
}
masks.push(Tensor::new(mask_buffer.as_slice(), &device)?);
}
}
}
// ========== APPLY MASKS: Apply all constraint masks ==========
let mut logits = input_logits.clone();
for mask in masks {
logits = logits.broadcast_add(&mask)?;
}
// ========== RULE 4: Probability-based timestamp preference ==========
// If sum of probability over timestamps is above any other token, sample timestamp
let log_probs = log_softmax(&logits, 0)?;
// Extract timestamp and text log probabilities
let timestamp_log_probs = log_probs.narrow(
0,
timestamp_begin as usize,
vocab_size as usize - timestamp_begin as usize,
)?;
let text_log_probs = log_probs.narrow(0, 0, timestamp_begin as usize)?;
// Implement logsumexp for timestamp tokens (numerically stable)
let timestamp_logprob = {
let max_val = timestamp_log_probs.max(0)?;
let shifted = timestamp_log_probs.broadcast_sub(&max_val)?;
let exp_shifted = shifted.exp()?;
let sum_exp = exp_shifted.sum(0)?;
let log_sum = sum_exp.log()?;
max_val.broadcast_add(&log_sum)?.to_scalar::<f32>()?
};
// Get max text token log probability
let max_text_token_logprob: f32 = text_log_probs.max(0)?.to_scalar::<f32>()?;
// Compare in log space
if timestamp_logprob > max_text_token_logprob {
// Only consider timestamp tokens
for i in 0..vocab_size {
mask_buffer[i as usize] = if i < timestamp_begin {
f32::NEG_INFINITY
} else {
0.0
};
}
let mask_tensor = Tensor::new(mask_buffer.as_slice(), &device)?;
logits = logits.broadcast_add(&mask_tensor)?;
}
Ok(logits)
}
fn run(&mut self, mel: &Tensor) -> Result<Vec<Segment>> {
let (_, _, content_frames) = mel.dims3()?;
let mut seek = 0;
let mut segments = vec![];
while seek < content_frames {
let start = std::time::Instant::now();
let time_offset = (seek * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64;
let segment_size = usize::min(content_frames - seek, m::N_FRAMES);
let mel_segment = mel.narrow(2, seek, segment_size)?;
let segment_duration = (segment_size * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64;
let dr = self.decode_with_fallback(&mel_segment)?;
seek += segment_size;
if dr.no_speech_prob > m::NO_SPEECH_THRESHOLD && dr.avg_logprob < m::LOGPROB_THRESHOLD {
println!("no speech detected, skipping {seek} {dr:?}");
continue;
}
let segment = Segment {
start: time_offset,
duration: segment_duration,
dr,
};
if self.timestamps {
println!(
"{:.1}s -- {:.1}s",
segment.start,
segment.start + segment.duration,
);
let mut tokens_to_decode = vec![];
let mut prev_timestamp_s = 0f32;
for &token in segment.dr.tokens.iter() {
if token == self.sot_token || token == self.eot_token {
continue;
}
// The no_timestamp_token is the last before the timestamp ones.
if token > self.no_timestamps_token {
let timestamp_s = (token - self.no_timestamps_token + 1) as f32 / 50.;
if !tokens_to_decode.is_empty() {
let text = self
.tokenizer
.decode(&tokens_to_decode, true)
.map_err(E::msg)?;
println!(" {:.1}s-{:.1}s: {}", prev_timestamp_s, timestamp_s, text);
tokens_to_decode.clear()
}
prev_timestamp_s = timestamp_s;
} else {
tokens_to_decode.push(token)
}
}
if !tokens_to_decode.is_empty() {
let text = self
.tokenizer
.decode(&tokens_to_decode, true)
.map_err(E::msg)?;
if !text.is_empty() {
println!(" {:.1}s-...: {}", prev_timestamp_s, text);
}
tokens_to_decode.clear()
}
} else {
println!(
"{:.1}s -- {:.1}s: {}",
segment.start,
segment.start + segment.duration,
segment.dr.text,
)
}
if self.verbose {
println!("{seek}: {segment:?}, in {:?}", start.elapsed());
}
segments.push(segment)
}
Ok(segments)
}
}
pub fn token_id(tokenizer: &Tokenizer, token: &str) -> candle::Result<u32> {
match tokenizer.token_to_id(token) {
None => candle::bail!("no token-id for {token}"),
Some(id) => Ok(id),
}
}
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Task {
Transcribe,
Translate,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, ValueEnum)]
enum WhichModel {
Tiny,
#[value(name = "tiny.en")]
TinyEn,
Base,
#[value(name = "base.en")]
BaseEn,
Small,
#[value(name = "small.en")]
SmallEn,
Medium,
#[value(name = "medium.en")]
MediumEn,
Large,
LargeV2,
LargeV3,
LargeV3Turbo,
#[value(name = "distil-medium.en")]
DistilMediumEn,
#[value(name = "distil-large-v2")]
DistilLargeV2,
#[value(name = "distil-large-v3")]
DistilLargeV3,
}
impl WhichModel {
fn is_multilingual(&self) -> bool {
match self {
Self::Tiny
| Self::Base
| Self::Small
| Self::Medium
| Self::Large
| Self::LargeV2
| Self::LargeV3
| Self::LargeV3Turbo
| Self::DistilLargeV2
| Self::DistilLargeV3 => true,
Self::TinyEn | Self::BaseEn | Self::SmallEn | Self::MediumEn | Self::DistilMediumEn => {
false
}
}
}
fn model_and_revision(&self) -> (&'static str, &'static str) {
match self {
Self::Tiny => ("openai/whisper-tiny", "main"),
Self::TinyEn => ("openai/whisper-tiny.en", "refs/pr/15"),
Self::Base => ("openai/whisper-base", "refs/pr/22"),
Self::BaseEn => ("openai/whisper-base.en", "refs/pr/13"),
Self::Small => ("openai/whisper-small", "main"),
Self::SmallEn => ("openai/whisper-small.en", "refs/pr/10"),
Self::Medium => ("openai/whisper-medium", "main"),
Self::MediumEn => ("openai/whisper-medium.en", "main"),
Self::Large => ("openai/whisper-large", "refs/pr/36"),
Self::LargeV2 => ("openai/whisper-large-v2", "refs/pr/57"),
Self::LargeV3 => ("openai/whisper-large-v3", "main"),
Self::LargeV3Turbo => ("openai/whisper-large-v3-turbo", "main"),
Self::DistilMediumEn => ("distil-whisper/distil-medium.en", "main"),
Self::DistilLargeV2 => ("distil-whisper/distil-large-v2", "main"),
Self::DistilLargeV3 => ("distil-whisper/distil-large-v3", "main"),
}
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
#[arg(long)]
model_id: Option<String>,
/// The model to use, check out available models:
/// https://huggingface.co/models?search=whisper
#[arg(long)]
revision: Option<String>,
/// The model to be used, can be tiny, small, medium.
#[arg(long, default_value = "tiny.en")]
model: WhichModel,
/// The input to be processed, in wav format, will default to `jfk.wav`. Alternatively
/// this can be set to sample:jfk, sample:gb1, ... to fetch a sample from the following
/// repo: https://huggingface.co/datasets/Narsil/candle_demo/
#[arg(long)]
input: Option<String>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
quantized: bool,
/// Language.
#[arg(long)]
language: Option<String>,
/// Task, when no task is specified, the input tokens contain only the sot token which can
/// improve things when in no-timestamp mode.
#[arg(long)]
task: Option<Task>,
/// Timestamps mode.
#[arg(long, default_value_t = true)]
timestamps: bool,
/// Maximum initial timestamp index to consider.
#[arg(long)]
max_initial_timestamp_index: Option<u32>,
/// Print the full DecodingResult structure rather than just the text.
#[arg(long)]
verbose: bool,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let device = candle_examples::device(args.cpu)?;
let (default_model, default_revision) = if args.quantized {
("lmz/candle-whisper", "main")
} else {
args.model.model_and_revision()
};
let default_model = default_model.to_string();
let default_revision = default_revision.to_string();
let (model_id, revision) = match (args.model_id, args.revision) {
(Some(model_id), Some(revision)) => (model_id, revision),
(Some(model_id), None) => (model_id, "main".to_string()),
(None, Some(revision)) => (default_model, revision),
(None, None) => (default_model, default_revision),
};
let (config_filename, tokenizer_filename, weights_filename, input) = {
let api = Api::new()?;
let dataset = api.dataset("Narsil/candle-examples".to_string());
let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision));
let sample = if let Some(input) = args.input {
if let Some(sample) = input.strip_prefix("sample:") {
dataset.get(&format!("samples_{sample}.wav"))?
} else {
std::path::PathBuf::from(input)
}
} else {
println!("No audio file submitted: Downloading https://huggingface.co/datasets/Narsil/candle_demo/blob/main/samples_jfk.wav");
dataset.get("samples_jfk.wav")?
};
let (config, tokenizer, model) = if args.quantized {
let ext = match args.model {
WhichModel::TinyEn => "tiny-en",
WhichModel::Tiny => "tiny",
_ => unimplemented!("no quantized support for {:?}", args.model),
};
(
repo.get(&format!("config-{ext}.json"))?,
repo.get(&format!("tokenizer-{ext}.json"))?,
repo.get(&format!("model-{ext}-q80.gguf"))?,
)
} else {
let config = repo.get("config.json")?;
let tokenizer = repo.get("tokenizer.json")?;
let model = repo.get("model.safetensors")?;
(config, tokenizer, model)
};
(config, tokenizer, model, sample)
};
let config: Config = serde_json::from_str(&std::fs::read_to_string(config_filename)?)?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let mel_bytes = match config.num_mel_bins {
80 => include_bytes!("melfilters.bytes").as_slice(),
128 => include_bytes!("melfilters128.bytes").as_slice(),
nmel => anyhow::bail!("unexpected num_mel_bins {nmel}"),
};
let mut mel_filters = vec![0f32; mel_bytes.len() / 4];
<byteorder::LittleEndian as byteorder::ByteOrder>::read_f32_into(mel_bytes, &mut mel_filters);
let (pcm_data, sample_rate) = candle_examples::audio::pcm_decode(input)?;
if sample_rate != m::SAMPLE_RATE as u32 {
anyhow::bail!("input file must have a {} sampling rate", m::SAMPLE_RATE)
}
println!("pcm data loaded {}", pcm_data.len());
let mel = audio::pcm_to_mel(&config, &pcm_data, &mel_filters);
let mel_len = mel.len();
let mel = Tensor::from_vec(
mel,
(1, config.num_mel_bins, mel_len / config.num_mel_bins),
&device,
)?;
println!("loaded mel: {:?}", mel.dims());
let mut model = if args.quantized {
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(
&weights_filename,
&device,
)?;
Model::Quantized(m::quantized_model::Whisper::load(&vb, config)?)
} else {
let vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], m::DTYPE, &device)? };
Model::Normal(m::model::Whisper::load(&vb, config)?)
};
let language_token = match (args.model.is_multilingual(), args.language) {
(true, None) => Some(multilingual::detect_language(&mut model, &tokenizer, &mel)?),
(false, None) => None,
(true, Some(language)) => match token_id(&tokenizer, &format!("<|{language}|>")) {
Ok(token_id) => Some(token_id),
Err(_) => anyhow::bail!("language {language} is not supported"),
},
(false, Some(_)) => {
anyhow::bail!("a language cannot be set for non-multilingual models")
}
};
let mut dc = Decoder::new(
model,
tokenizer,
args.seed,
&device,
language_token,
args.task,
args.timestamps,
args.max_initial_timestamp_index,
args.verbose,
)?;
dc.run(&mel)?;
Ok(())
}
| candle/candle-examples/examples/whisper/main.rs/0 | {
"file_path": "candle/candle-examples/examples/whisper/main.rs",
"repo_id": "candle",
"token_count": 14388
} | 44 |
# candle-yolo-v8: Object Detection and Pose Estimation
This is a port of [Ultralytics
YOLOv8](https://github.com/ultralytics/ultralytics). The implementation is based
on the [tinygrad
version](https://github.com/tinygrad/tinygrad/blob/master/examples/yolov8.py)
and on the model architecture described in this
[issue](https://github.com/ultralytics/ultralytics/issues/189). The supported
tasks are object detection and pose estimation.
You can try this model online on the [Candle YOLOv8
Space](https://huggingface.co/spaces/lmz/candle-yolo). The model then fully runs
in your browser using WebAssembly - if you use a custom image it will never
leave your phone/computer!
## Running some example
### Object Detection
```bash
cargo run --example yolo-v8 --release -- candle-examples/examples/yolo-v8/assets/bike.jpg
```
This prints details about the detected objects and generates a `bike.pp.jpg` file.

Image source:
[wikimedia](https://commons.wikimedia.org/wiki/File:Leading_group,_Giro_d%27Italia_2021,_Stage_15.jpg).

### Pose Estimation
```bash
cargo run --example yolo-v8 --release -- \
candle-examples/examples/yolo-v8/assets/bike.jpg --task pose
```

### Command-line flags
- `--which`: select the model variant to be used, `n`, `s` , `m`, `l`, or `x` by
increasing size and quality.
- `--task`: `detect` for object detection and `pose` for pose estimation.
- `--legend-size`: the size of the characters to print.
- `--model`: use a local model file rather than downloading it from the hub.
| candle/candle-examples/examples/yolo-v8/README.md/0 | {
"file_path": "candle/candle-examples/examples/yolo-v8/README.md",
"repo_id": "candle",
"token_count": 562
} | 45 |
# candle-flash-attn
| candle/candle-flash-attn/README.md/0 | {
"file_path": "candle/candle-flash-attn/README.md",
"repo_id": "candle",
"token_count": 8
} | 46 |
// Pytorch also has an implementation of Philox RNG: https://github.com/pytorch/pytorch/blob/8ca3c881db3e3510fcb7725389f6a0633c9b992c/torch/csrc/jit/tensorexpr/cuda_random.h
#pragma once
// Philox CUDA.
namespace flash {
struct ull2 {
unsigned long long x;
unsigned long long y;
};
__forceinline__ __device__ uint2 mulhilo32(const unsigned int a, const unsigned int b) {
uint2 *res;
unsigned long long tmp;
asm ("mul.wide.u32 %0, %1, %2;\n\t"
: "=l"(tmp)
: "r"(a), "r"(b));
res = (uint2*)(&tmp);
return *res;
}
__forceinline__ __device__ uint4 philox_single_round(const uint4 ctr, const uint2 key) {
constexpr unsigned long kPhiloxSA = 0xD2511F53;
constexpr unsigned long kPhiloxSB = 0xCD9E8D57;
uint2 res0 = mulhilo32(kPhiloxSA, ctr.x);
uint2 res1 = mulhilo32(kPhiloxSB, ctr.z);
uint4 ret = {res1.y ^ ctr.y ^ key.x, res1.x, res0.y ^ ctr.w ^ key.y, res0.x};
return ret;
}
__forceinline__ __device__ uint4 philox(unsigned long long seed,
unsigned long long subsequence,
unsigned long long offset) {
constexpr unsigned long kPhilox10A = 0x9E3779B9;
constexpr unsigned long kPhilox10B = 0xBB67AE85;
uint2 key = reinterpret_cast<uint2&>(seed);
uint4 counter;
ull2 *tmp = reinterpret_cast<ull2*>(&counter);
tmp->x = offset;
tmp->y = subsequence;
#pragma unroll
for (int i = 0; i < 6; i++) {
counter = philox_single_round(counter, key);
key.x += (kPhilox10A);
key.y += (kPhilox10B);
}
uint4 output = philox_single_round(counter, key);
return output;
}
} // namespace flash
| candle/candle-flash-attn/kernels/philox.cuh/0 | {
"file_path": "candle/candle-flash-attn/kernels/philox.cuh",
"repo_id": "candle",
"token_count": 770
} | 47 |
#include "cuda_utils.cuh"
#include<stdint.h>
// Naive implementation of conv1d.
template <typename T, typename A>
__device__ void conv1d(
const size_t src_numel,
const size_t l_out,
const size_t stride,
const size_t padding,
const size_t dilation,
const size_t *info,
const T *src,
const T *kernel,
T *dst
) {
// src: (b_size, c_in, l_in)
// k: (c_out, c_in, k_size)
const size_t *src_dims = info;
const size_t *src_s = info + 3;
const size_t *k_dims = info + 6;
const size_t *k_s = info + 9;
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
const size_t k_size = k_dims[2];
const size_t c_out = k_dims[0];
const size_t c_in = src_dims[1];
const size_t l_in = src_dims[2];
if (dst_i >= src_dims[0] * c_out * l_out) {
return;
}
// TODO
const size_t b_idx = dst_i / (l_out * c_out);
const size_t dst_c_idx = (dst_i / l_out) % c_out;
const size_t dst_l = dst_i % l_out;
const size_t src_idx0 = b_idx * src_s[0];
A d = 0;
for (size_t offset = 0; offset < k_size; ++offset) {
size_t src_l = (stride * dst_l + offset) * dilation;
if (src_l < padding || src_l >= padding + l_in) {
continue;
}
src_l -= padding;
for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) {
const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + src_l * src_s[2];
const size_t k_idx = dst_c_idx * k_s[0] + src_c_idx * k_s[1] + offset * k_s[2];
d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]);
}
}
dst[dst_i] = static_cast<T>(d);
}
template <typename T>
__device__ void im2col1d(
const size_t numel,
const size_t l_out,
const size_t l_k,
const size_t stride,
const size_t padding,
const size_t dilation,
const size_t *info,
const T *src,
T *dst
) {
const size_t thread_i = blockIdx.x * blockDim.x + threadIdx.x;
// dst: (b_size, l_out, c_in, l_k)
// src: (b_size, c_in, l_in)
if (thread_i >= numel) {
return;
}
const size_t *src_dims = info;
const size_t *src_s = info + 3;
const size_t c_in = src_dims[1];
const size_t l_in = src_dims[2];
const size_t dst_s1 = c_in;
const size_t dst_s0 = l_out * dst_s1;
size_t tmp_dst_i = thread_i;
const size_t b_idx = tmp_dst_i / dst_s0;
tmp_dst_i -= b_idx * dst_s0;
const size_t l_idx = tmp_dst_i / dst_s1;
tmp_dst_i -= l_idx * dst_s1;
const size_t c_idx = tmp_dst_i;
for (size_t l_k_idx = 0; l_k_idx < l_k; ++l_k_idx) {
size_t src_l_idx = l_idx * stride + l_k_idx * dilation;
size_t dst_i = thread_i * l_k + l_k_idx;
if (src_l_idx < padding || src_l_idx >= l_in + padding) {
dst[dst_i] = static_cast<T>(0);
}
else {
src_l_idx -= padding;
const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_l_idx * src_s[2];
dst[dst_i] = src[src_i];
}
}
}
template <typename T>
__device__ void col2im1d(
const size_t dst_el,
const size_t l_out,
const size_t l_in,
const size_t c_out,
const size_t k_size,
const size_t stride,
const T *src,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, l_in, c_out, l_k)
// dst: (b_size, c_out, l_out)
if (dst_i >= dst_el) {
return;
}
const size_t dst_s0 = c_out * l_out;
const size_t dst_s1 = l_out;
const size_t src_s0 = c_out * k_size * l_in;
const size_t src_s1 = c_out * k_size;
const size_t src_s2 = k_size;
size_t tmp_dst_i = dst_i;
const size_t b_idx = tmp_dst_i / dst_s0;
tmp_dst_i -= b_idx * dst_s0;
const size_t c_idx = tmp_dst_i / dst_s1;
tmp_dst_i -= c_idx * dst_s1;
const int l_out_idx = tmp_dst_i;
dst[dst_i] = static_cast<T>(0);
int l_in_idx = l_out_idx / stride;
int k0 = l_out_idx - l_in_idx * stride;
// l_out_idx = l_in_idx * stride + k0
for (; k0 < k_size && l_in_idx >= 0; k0 += stride, --l_in_idx) {
if (l_in_idx < l_in) {
const size_t src_i = b_idx * src_s0 + l_in_idx * src_s1 + c_idx * src_s2 + k0;
dst[dst_i] += src[src_i];
}
}
}
template <typename T>
__device__ void im2col(
const size_t dst_numel,
const size_t h_out,
const size_t w_out,
const size_t h_k,
const size_t w_k,
const size_t stride,
const size_t padding,
const size_t dilation,
const size_t *info,
const T *src,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// dst: (b_size, h_out, w_out, c_in, h_k, w_k)
// src: (b_size, c_in, h_in, w_in)
if (dst_i >= dst_numel) {
return;
}
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t c_in = src_dims[1];
const size_t h_in = src_dims[2];
const size_t w_in = src_dims[3];
const size_t dst_s4 = w_k;
const size_t dst_s3 = h_k * dst_s4;
const size_t dst_s2 = c_in * dst_s3;
const size_t dst_s1 = w_out * dst_s2;
const size_t dst_s0 = h_out * dst_s1;
size_t tmp_dst_i = dst_i;
const size_t b_idx = tmp_dst_i / dst_s0;
tmp_dst_i -= b_idx * dst_s0;
const size_t h_idx = tmp_dst_i / dst_s1;
tmp_dst_i -= h_idx * dst_s1;
const size_t w_idx = tmp_dst_i / dst_s2;
tmp_dst_i -= w_idx * dst_s2;
const size_t c_idx = tmp_dst_i / dst_s3;
tmp_dst_i -= c_idx * dst_s3;
const size_t h_k_idx = tmp_dst_i / dst_s4;
tmp_dst_i -= h_k_idx * dst_s4;
const size_t w_k_idx = tmp_dst_i;
size_t src_h_idx = h_idx * stride + h_k_idx * dilation;
size_t src_w_idx = w_idx * stride + w_k_idx * dilation;
if (src_h_idx < padding || src_h_idx >= h_in + padding) {
dst[dst_i] = static_cast<T>(0);
}
else if (src_w_idx < padding || src_w_idx >= w_in + padding) {
dst[dst_i] = static_cast<T>(0);
}
else {
src_h_idx -= padding;
src_w_idx -= padding;
const size_t src_i =
b_idx * src_s[0]
+ c_idx * src_s[1]
+ src_h_idx * src_s[2]
+ src_w_idx * src_s[3];
dst[dst_i] = src[src_i];
}
}
// Naive implementation of conv2d.
template <typename T, typename A>
__device__ void conv2d(
const size_t src_numel,
const size_t w_out,
const size_t h_out,
const size_t stride,
const size_t padding,
const size_t dilation,
const size_t *info,
const T *src,
const T *kernel,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, h_in, w_in)
// k: (c_out, c_in, h_k, w_k)
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t *k_dims = info + 8;
const size_t *k_s = info + 12;
const size_t h_k = k_dims[2];
const size_t w_k = k_dims[3];
const size_t c_out = k_dims[0];
const size_t c_in = src_dims[1];
const size_t h_in = src_dims[2];
const size_t w_in = src_dims[3];
if (dst_i >= src_dims[0] * c_out * w_out * h_out) {
return;
}
// TODO
const size_t b_idx = dst_i / (w_out * h_out * c_out);
const size_t dst_c_idx = (dst_i / (w_out * h_out)) % c_out;
// NCHW layout.
const size_t dst_h = (dst_i / w_out) % h_out;
const size_t dst_w = dst_i % w_out;
const size_t src_idx0 = b_idx * src_s[0];
A d = 0;
for (size_t w_offset = 0; w_offset < w_k; ++w_offset) {
size_t src_w = stride * dst_w + w_offset * dilation;
if (src_w < padding || src_w >= w_in + padding) {
continue;
}
src_w -= padding;
for (size_t h_offset = 0; h_offset < h_k; ++h_offset) {
size_t src_h = stride * dst_h + h_offset * dilation;
if (src_h < padding || src_h >= h_in + padding) {
continue;
}
src_h -= padding;
for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) {
const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + src_h * src_s[2] + src_w * src_s[3];
const size_t k_idx = dst_c_idx * k_s[0] + src_c_idx * k_s[1] + h_offset * k_s[2] + w_offset * k_s[3];
d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]);
}
}
}
dst[dst_i] = static_cast<T>(d);
}
// Naive implementation of conv_transpose1d.
template <typename T, typename A>
__device__ void conv_transpose1d(
const size_t src_numel,
const size_t l_out,
const size_t stride,
const size_t padding,
const size_t out_padding,
const size_t dilation,
const size_t *info,
const T *src,
const T *kernel,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, l_in)
// k: (c_in, c_out, l_k)
const size_t *src_dims = info;
const size_t *src_s = info + 3;
const size_t *k_dims = info + 6;
const size_t *k_s = info + 9;
const size_t l_k = k_dims[2];
const size_t c_out = k_dims[1];
const size_t c_in = src_dims[1];
const size_t l_in = src_dims[2];
if (dst_i >= src_dims[0] * c_out * l_out) {
return;
}
// TODO
const size_t b_idx = dst_i / (l_out * c_out);
const size_t dst_c_idx = (dst_i / l_out) % c_out;
// NCL layout.
const size_t out_x = dst_i % l_out;
const size_t src_idx0 = b_idx * src_s[0];
A d = 0;
for (int k_x = 0; k_x < (int)l_k; ++k_x) {
// let out_x = inp_x * p.stride + k_x * p.dilation - p.padding;
int inp_x_stride = (int)(out_x + padding) - k_x * dilation;
if (inp_x_stride < 0 || inp_x_stride % stride) {
continue;
}
int inp_x = inp_x_stride / stride;
if (inp_x >= l_in) continue;
for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) {
const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + inp_x * src_s[2];
const size_t k_idx = src_c_idx * k_s[0] + dst_c_idx * k_s[1] + k_x * k_s[2];
d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]);
}
}
dst[dst_i] = static_cast<T>(d);
}
// Naive implementation of conv_transpose2d.
template <typename T, typename A>
__device__ void conv_transpose2d(
const size_t src_numel,
const size_t w_out,
const size_t h_out,
const size_t stride,
const size_t padding,
const size_t out_padding,
const size_t dilation,
const size_t *info,
const T *src,
const T *kernel,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, h_in, w_in)
// k: (c_in, c_out, h_k, w_k)
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t *k_dims = info + 8;
const size_t *k_s = info + 12;
const size_t h_k = k_dims[2];
const size_t w_k = k_dims[3];
const size_t c_out = k_dims[1];
const size_t c_in = src_dims[1];
const size_t h_in = src_dims[2];
const size_t w_in = src_dims[3];
if (dst_i >= src_dims[0] * c_out * w_out * h_out) {
return;
}
// TODO
const size_t b_idx = dst_i / (w_out * h_out * c_out);
const size_t dst_c_idx = (dst_i / (w_out * h_out)) % c_out;
// NCHW layout.
const size_t out_y = (dst_i / w_out) % h_out;
const size_t out_x = dst_i % w_out;
const size_t src_idx0 = b_idx * src_s[0];
A d = 0;
for (int k_x = 0; k_x < (int)w_k; ++k_x) {
// let out_x = inp_x * p.stride + k_x * p.dilation - p.padding;
int inp_x_stride = (int)(out_x + padding) - k_x * dilation;
if (inp_x_stride < 0 || inp_x_stride % stride) {
continue;
}
int inp_x = inp_x_stride / stride;
if (inp_x >= w_in) continue;
for (int k_y = 0; k_y < (int)h_k; ++k_y) {
int inp_y_stride = (int)(out_y + padding) - k_y * dilation;
if (inp_y_stride < 0 || inp_y_stride % stride) {
continue;
}
int inp_y = inp_y_stride / stride;
if (inp_y >= h_in) continue;
for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) {
const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + inp_y * src_s[2] + inp_x * src_s[3];
const size_t k_idx = src_c_idx * k_s[0] + dst_c_idx * k_s[1] + k_y * k_s[2] + k_x * k_s[3];
d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]);
}
}
}
dst[dst_i] = static_cast<T>(d);
}
template <typename T, typename A>
__device__ void avg_pool2d(
const size_t src_numel,
const size_t w_k,
const size_t h_k,
const size_t w_stride,
const size_t h_stride,
const size_t *info,
const T *src,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, w_in, h_in)
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t c = src_dims[1];
const size_t w_in = src_dims[2];
const size_t h_in = src_dims[3];
const size_t w_out = (w_in - w_k) / w_stride + 1;
const size_t h_out = (h_in - h_k) / h_stride + 1;
if (dst_i >= src_dims[0] * c * w_out * h_out) {
return;
}
// TODO: Improve this.
const size_t b_idx = dst_i / (w_out * h_out * c);
const size_t c_idx = (dst_i / (w_out * h_out)) % c;
const size_t dst_w = (dst_i / h_out) % w_out;
const size_t dst_h = dst_i % h_out;
const size_t src_idx0 = b_idx * src_s[0];
const float scale = 1.0 / (w_k * h_k);
A d = 0;
for (size_t w_offset = 0; w_offset < w_k; ++w_offset) {
size_t src_w = w_stride * dst_w + w_offset;
if (src_w >= w_in) {
continue;
}
for (size_t h_offset = 0; h_offset < h_k; ++h_offset) {
size_t src_h = h_stride * dst_h + h_offset;
if (src_h >= h_in) {
continue;
}
const size_t src_idx = src_idx0 + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3];
d += static_cast<A>(src[src_idx]);
}
}
dst[dst_i] = static_cast<T>(d * scale);
}
template <typename T>
__device__ void max_pool2d(
const size_t src_numel,
const size_t w_k,
const size_t h_k,
const size_t w_stride,
const size_t h_stride,
const size_t *info,
const T *src,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, w_in, h_in)
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t c = src_dims[1];
const size_t w_in = src_dims[2];
const size_t h_in = src_dims[3];
const size_t w_out = (w_in - w_k) / w_stride + 1;
const size_t h_out = (h_in - h_k) / h_stride + 1;
if (dst_i >= src_dims[0] * c * w_out * h_out) {
return;
}
// TODO: Improve this.
const size_t b_idx = dst_i / (w_out * h_out * c);
const size_t c_idx = (dst_i / (w_out * h_out)) % c;
const size_t dst_w = (dst_i / h_out) % w_out;
const size_t dst_h = dst_i % h_out;
const size_t src_idx0 = b_idx * src_s[0];
T d = 0;
bool set = false;
for (size_t w_offset = 0; w_offset < w_k; ++w_offset) {
size_t src_w = w_stride * dst_w + w_offset;
if (src_w >= w_in) {
continue;
}
for (size_t h_offset = 0; h_offset < h_k; ++h_offset) {
size_t src_h = h_stride * dst_h + h_offset;
if (src_h >= h_in) {
continue;
}
const size_t src_idx = src_idx0 + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3];
if (set) {
d = maxg(d, src[src_idx]);
}
else {
d = src[src_idx];
set = true;
}
}
}
dst[dst_i] = d;
}
template <typename T>
__device__ void upsample_nearest2d(
const size_t w_out,
const size_t h_out,
const double w_scale,
const double h_scale,
const size_t *info,
const T *src,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, w_in, h_in)
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t c = src_dims[1];
const size_t w_in = src_dims[2];
const size_t h_in = src_dims[3];
if (dst_i >= src_dims[0] * c * w_out * h_out) {
return;
}
// TODO: Improve this.
const size_t b_idx = dst_i / (w_out * h_out * c);
const size_t c_idx = (dst_i / (w_out * h_out)) % c;
const size_t dst_w = (dst_i / h_out) % w_out;
const size_t dst_h = dst_i % h_out;
size_t src_w = static_cast<size_t>(dst_w * w_scale);
size_t src_h = static_cast<size_t>(dst_h * h_scale);
if (src_w >= w_in) {
src_w = w_in - 1;
}
if (src_h >= h_in) {
src_h = h_in - 1;
}
const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3];
dst[dst_i] = src[src_i];
}
#define CONV1D_OP(TYPENAME, TYPEACC, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t num_dims, \
const size_t stride, \
const size_t padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
const TYPENAME *kernel, \
TYPENAME *dst \
) { \
conv1d<TYPENAME, TYPEACC>(src_numel, num_dims, stride, padding, dilation, info, src, kernel, dst); \
} \
#define CONV2D_OP(TYPENAME, TYPEACC, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t w_out, \
const size_t h_out, \
const size_t stride, \
const size_t padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
const TYPENAME *kernel, \
TYPENAME *dst \
) { \
conv2d<TYPENAME, TYPEACC>(src_numel, w_out, h_out, stride, padding, dilation, info, src, kernel, dst); \
} \
#define IM2COL1D_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t dst_numel, \
const size_t l_out, \
const size_t l_k, \
const size_t stride, \
const size_t padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
im2col1d<TYPENAME>(dst_numel, l_out, l_k, stride, padding, dilation, info, src, dst); \
} \
#define COL2IM1D_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t dst_el, \
const size_t l_out, \
const size_t l_in, \
const size_t c_out, \
const size_t k_size, \
const size_t stride, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
col2im1d<TYPENAME>(dst_el, l_out, l_in, c_out, k_size, stride, src, dst); \
} \
#define IM2COL_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t dst_numel, \
const size_t h_out, \
const size_t w_out, \
const size_t h_k, \
const size_t w_k, \
const size_t stride, \
const size_t padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
im2col<TYPENAME>(dst_numel, h_out, w_out, h_k, w_k, stride, padding, dilation, info, src, dst); \
} \
#define CONVT1D_OP(TYPENAME, TYPEACC, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t l_out, \
const size_t stride, \
const size_t padding, \
const size_t out_padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
const TYPENAME *kernel, \
TYPENAME *dst \
) { \
conv_transpose1d<TYPENAME, TYPEACC>(src_numel, l_out, stride, padding, out_padding, dilation, info, src, kernel, dst); \
} \
#define CONVT2D_OP(TYPENAME, TYPEACC, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t w_out, \
const size_t h_out, \
const size_t stride, \
const size_t padding, \
const size_t out_padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
const TYPENAME *kernel, \
TYPENAME *dst \
) { \
conv_transpose2d<TYPENAME, TYPEACC>(src_numel, w_out, h_out, stride, padding, out_padding, dilation, info, src, kernel, dst); \
} \
#define AVG_POOL2D_OP(TYPENAME, TYPEACC, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t w_k, \
const size_t h_k, \
const size_t w_stride, \
const size_t h_stride, \
const size_t *info, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
avg_pool2d<TYPENAME, TYPEACC>(src_numel, w_k, h_k, w_stride, h_stride, info, src, dst); \
} \
#define MAX_POOL2D_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t w_k, \
const size_t h_k, \
const size_t w_stride, \
const size_t h_stride, \
const size_t *info, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
max_pool2d<TYPENAME>(src_numel, w_k, h_k, w_stride, h_stride, info, src, dst); \
} \
#define UPSAMPLE_NEAREST2D_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t w_out, \
const size_t h_out, \
const double w_scale, \
const double h_scale, \
const size_t *info, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
upsample_nearest2d<TYPENAME>(w_out, h_out, w_scale, h_scale, info, src, dst); \
} \
#if __CUDA_ARCH__ >= 800
CONV1D_OP(__nv_bfloat16, float, conv1d_bf16)
CONV2D_OP(__nv_bfloat16, float, conv2d_bf16)
CONVT1D_OP(__nv_bfloat16, float, conv_transpose1d_bf16)
CONVT2D_OP(__nv_bfloat16, float, conv_transpose2d_bf16)
AVG_POOL2D_OP(__nv_bfloat16, float, avg_pool2d_bf16)
MAX_POOL2D_OP(__nv_bfloat16, max_pool2d_bf16)
UPSAMPLE_NEAREST2D_OP(__nv_bfloat16, upsample_nearest2d_bf16)
IM2COL_OP(__nv_bfloat16, im2col_bf16)
IM2COL1D_OP(__nv_bfloat16, im2col1d_bf16)
COL2IM1D_OP(__nv_bfloat16, col2im1d_bf16)
// NOTE: No conv ops for f8
// CONV1D_OP(__nv_bfloat16, float, conv1d_f8_e5m)
// CONV2D_OP(__nv_fp8_e4m3, float, conv2d_f8_e5m)
// CONVT1D_OP(__nv_fp8_e4m3, float, conv_transpose1d_f8_e5m)
// CONVT2D_OP(__nv_fp8_e4m3, float, conv_transpose2d_f8_e5m)
// AVG_POOL2D_OP(__nv_fp8_e4m3, float, avg_pool2d_f8_e5m)
// MAX_POOL2D_OP(__nv_fp8_e4m3, max_pool2d_f8_e5m)
// UPSAMPLE_NEAREST2D_OP(__nv_fp8_e4m3, upsample_nearest2d_f8_e5m)
// IM2COL_OP(__nv_fp8_e4m3, im2col_f8_e5m)
// IM2COL1D_OP(__nv_fp8_e4m3, im2col1d_f8_e5m)
// COL2IM1D_OP(__nv_fp8_e4m3, col2im1d_f8_e5m)
#endif
#if __CUDA_ARCH__ >= 530
CONV1D_OP(__half, float, conv1d_f16)
CONV2D_OP(__half, float, conv2d_f16)
CONVT1D_OP(__half, float, conv_transpose1d_f16)
CONVT2D_OP(__half, float, conv_transpose2d_f16)
AVG_POOL2D_OP(__half, float, avg_pool2d_f16)
MAX_POOL2D_OP(__half, max_pool2d_f16)
UPSAMPLE_NEAREST2D_OP(__half, upsample_nearest2d_f16)
IM2COL_OP(__half, im2col_f16)
IM2COL1D_OP(__half, im2col1d_f16)
COL2IM1D_OP(__half, col2im1d_f16)
#endif
CONV1D_OP(float, float, conv1d_f32)
CONV1D_OP(double, double, conv1d_f64)
CONV1D_OP(uint8_t, uint8_t, conv1d_u8)
CONV1D_OP(uint32_t, uint32_t, conv1d_u32)
CONV2D_OP(float, float, conv2d_f32)
CONV2D_OP(double, double, conv2d_f64)
CONV2D_OP(uint8_t, uint8_t, conv2d_u8)
CONV2D_OP(uint32_t, uint32_t, conv2d_u32)
CONVT1D_OP(float, float, conv_transpose1d_f32)
CONVT1D_OP(double, double, conv_transpose1d_f64)
CONVT1D_OP(uint8_t, uint8_t, conv_transpose1d_u8)
CONVT1D_OP(uint32_t, uint32_t, conv_transpose1d_u32)
CONVT2D_OP(float, float, conv_transpose2d_f32)
CONVT2D_OP(double, double, conv_transpose2d_f64)
CONVT2D_OP(uint8_t, uint8_t, conv_transpose2d_u8)
CONVT2D_OP(uint32_t, uint32_t, conv_transpose2d_u32)
AVG_POOL2D_OP(float, float, avg_pool2d_f32)
AVG_POOL2D_OP(double, double, avg_pool2d_f64)
AVG_POOL2D_OP(uint8_t, uint8_t, avg_pool2d_u8)
AVG_POOL2D_OP(uint32_t, uint32_t, avg_pool2d_u32)
MAX_POOL2D_OP(float, max_pool2d_f32)
MAX_POOL2D_OP(double, max_pool2d_f64)
MAX_POOL2D_OP(uint8_t, max_pool2d_u8)
MAX_POOL2D_OP(uint32_t, max_pool2d_u32)
UPSAMPLE_NEAREST2D_OP(float, upsample_nearest2d_f32)
UPSAMPLE_NEAREST2D_OP(double, upsample_nearest2d_f64)
UPSAMPLE_NEAREST2D_OP(uint8_t, upsample_nearest2d_u8)
UPSAMPLE_NEAREST2D_OP(uint32_t, upsample_nearest2d_u32)
IM2COL_OP(float, im2col_f32)
IM2COL_OP(double, im2col_f64)
IM2COL_OP(uint8_t, im2col_u8)
IM2COL_OP(uint32_t, im2col_u32)
IM2COL1D_OP(float, im2col1d_f32)
IM2COL1D_OP(double, im2col1d_f64)
IM2COL1D_OP(uint8_t, im2col1d_u8)
IM2COL1D_OP(uint32_t, im2col1d_u32)
COL2IM1D_OP(float, col2im1d_f32)
COL2IM1D_OP(double, col2im1d_f64)
COL2IM1D_OP(uint8_t, col2im1d_u8)
COL2IM1D_OP(uint32_t, col2im1d_u32)
| candle/candle-kernels/src/conv.cu/0 | {
"file_path": "candle/candle-kernels/src/conv.cu",
"repo_id": "candle",
"token_count": 12097
} | 48 |
#include <metal_stdlib>
METAL_FUNC uint get_strided_index(
uint idx,
constant size_t &num_dims,
constant size_t *dims,
constant size_t *strides
) {
uint strided_i = 0;
for (uint d = 0; d < num_dims; d++) {
uint dim_idx = num_dims - 1 - d;
strided_i += (idx % dims[dim_idx]) * strides[dim_idx];
idx /= dims[dim_idx];
}
return strided_i;
}
using namespace metal;
#define CAST(FN_NAME, FN_NAME_STRIDED, LEFT_TYPENAME, RIGHT_TYPENAME) \
kernel void FN_NAME( \
constant size_t &dim, \
device const LEFT_TYPENAME *input, \
device RIGHT_TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
if (tid >= dim) { \
return; \
} \
output[tid] = static_cast<RIGHT_TYPENAME>(input[tid]); \
} \
kernel void FN_NAME_STRIDED( \
constant size_t &dim, \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
device const LEFT_TYPENAME *input, \
device RIGHT_TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
if (tid >= dim) { \
return; \
} \
output[tid] = static_cast<RIGHT_TYPENAME>(input[get_strided_index(tid, num_dims, dims, strides)]); \
} \
#define CAST_THROUGH(FN_NAME, FN_NAME_STRIDED, LEFT_TYPENAME, RIGHT_TYPENAME, IR_TYPENAME) \
kernel void FN_NAME( \
constant size_t &dim, \
device const LEFT_TYPENAME *input, \
device RIGHT_TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
if (tid >= dim) { \
return; \
} \
output[tid] = static_cast<RIGHT_TYPENAME>(static_cast<IR_TYPENAME>(input[tid])); \
} \
kernel void FN_NAME_STRIDED( \
constant size_t &dim, \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
device const LEFT_TYPENAME *input, \
device RIGHT_TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
if (tid >= dim) { \
return; \
} \
output[tid] = static_cast<RIGHT_TYPENAME>(static_cast<IR_TYPENAME>(input[get_strided_index(tid, num_dims, dims, strides)])); \
} \
// u32
CAST(cast_u32_f32, cast_u32_f32_strided, uint32_t, float)
CAST(cast_u32_u8, cast_u32_u8_strided, uint32_t, uint8_t)
CAST(cast_u32_f16, cast_u32_f16_strided, uint32_t, half)
#if __METAL_VERSION__ >= 220
CAST(cast_u32_i64, cast_u32_i64_strided, uint32_t, int64_t)
#endif
#if defined(__HAVE_BFLOAT__)
CAST(cast_u32_bf16, cast_u32_bf16_strided, uint32_t, bfloat)
#endif
// u8
CAST(cast_u8_u32, cast_u8_u32_strided, uint8_t, uint32_t)
CAST(cast_u8_f32, cast_u8_f32_strided, uint8_t, float)
CAST(cast_u8_f16, cast_u8_f16_strided, uint8_t, half)
#if __METAL_VERSION__ >= 220
CAST(cast_u8_i64, cast_u8_i64_strided, uint8_t, int64_t)
#endif
#if defined(__HAVE_BFLOAT__)
CAST(cast_u8_bf16, cast_u8_bf16_strided, uint8_t, bfloat)
#endif
// f16
CAST(cast_f16_f32, cast_f16_f32_strided, half, float)
CAST(cast_f16_u8, cast_f16_u8_strided, half, uint8_t)
CAST(cast_f16_u32, cast_f16_u32_strided, half, uint32_t)
CAST(cast_f16_i64, cast_f16_i64_strided, half, int64_t)
#if defined(__HAVE_BFLOAT__)
CAST_THROUGH(cast_f16_bf16, cast_f16_bf16_strided, half, bfloat, float)
#endif
// i64
CAST(cast_i64_f32, cast_i64_f32_strided, int64_t, float)
CAST(cast_i64_u8, cast_i64_u8_strided, int64_t, uint8_t)
CAST(cast_i64_u32, cast_i64_u32_strided, int64_t, uint32_t)
CAST(cast_i64_f16, cast_i64_f16_strided, int64_t, half)
#if defined(__HAVE_BFLOAT__)
CAST_THROUGH(cast_i64_bf16, cast_i64_bf16_strided, int64_t, bfloat, float)
#endif
// f32
CAST(cast_f32_f16, cast_f32_f16_strided, float, half)
CAST(cast_f32_u32, cast_f32_u32_strided, float, uint32_t)
CAST(cast_f32_u8, cast_f32_u8_strided, float, uint8_t)
CAST(cast_f32_i64, cast_f32_i64_strided, float, int64_t)
#if defined(__HAVE_BFLOAT__)
CAST(cast_f32_bf16, cast_f32_bf16_strided, float, bfloat)
#endif
// bf16
#if defined(__HAVE_BFLOAT__)
CAST(cast_bf16_u32, cast_bf16_u32_strided, bfloat, uint32_t)
CAST(cast_bf16_i64, cast_bf16_i64_strided, bfloat, int64_t)
CAST(cast_bf16_f32, cast_bf16_f32_strided, bfloat, float)
CAST_THROUGH(cast_bf16_u8, cast_bf16_u8_strided, bfloat, uint8_t, float)
CAST_THROUGH(cast_bf16_f16, cast_bf16_f16_strided, bfloat, half, float)
#endif | candle/candle-metal-kernels/src/cast.metal/0 | {
"file_path": "candle/candle-metal-kernels/src/cast.metal",
"repo_id": "candle",
"token_count": 2045
} | 49 |
#include <metal_stdlib>
#include <metal_math>
#
using namespace metal;
METAL_FUNC uint get_strided_index(
uint idx,
constant size_t &num_dims,
constant size_t *dims,
constant size_t *strides
) {
uint strided_i = 0;
for (uint d = 0; d < num_dims; d++) {
uint dim_idx = num_dims - 1 - d;
strided_i += (idx % dims[dim_idx]) * strides[dim_idx];
idx /= dims[dim_idx];
}
return strided_i;
}
template <typename T> METAL_FUNC T sqr(T in){ return in * in; }
template <typename T> METAL_FUNC T recip(T in){ return T(1.0 / in); }
template <typename T> METAL_FUNC T neg(T in){ return -in; }
template <typename T> METAL_FUNC T erf(T in){
float x = (float) in;
// constants
float a1 = 0.254829592;
float a2 = -0.284496736;
float a3 = 1.421413741;
float a4 = -1.453152027;
float a5 = 1.061405429;
float p = 0.3275911;
// Save the sign of x
int sign = 1;
if (x < 0)
sign = -1;
x = fabs(x);
// A&S formula 7.1.26
float t = 1.0/(1.0 + p*x);
float y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x);
return T(sign*y);
}
template <typename T> METAL_FUNC T id(T in) { return in; }
template <typename T> METAL_FUNC T gelu_erf(T x) {
return T(x * (1 + erf(x * M_SQRT1_2_F)) / 2);
}
template <typename T> METAL_FUNC T gelu(T x) {
if (x > 5) {
return x;
}
T x_sq = x * x;
T x_cube = x_sq * x;
T alpha = x + static_cast<T>(0.044715) * x_cube;
T beta = (static_cast<T>(M_2_SQRTPI_F * M_SQRT1_2_F) * alpha);
return static_cast<T>(0.5) * x * (static_cast<T>(1.0) + T(precise::tanh(beta)));
}
template <typename T> METAL_FUNC T relu(T in){
if (in < 0) {
return 0;
}
return in;
}
template <typename T> METAL_FUNC T silu(T in){
return in / (static_cast<T>(1) + exp(-in));
}
template <typename T> METAL_FUNC T sigmoid(T in) {
return recip(static_cast<T>(1) + exp(-in));
}
#define TILE_SIZE 2
#define CONST_SET(TYPENAME, FN_NAME) \
kernel void FN_NAME( \
constant size_t &dim, \
constant TYPENAME &input, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
if (tid >= dim) { \
return; \
} \
output[tid] = input; \
} \
kernel void FN_NAME##_##strided( \
constant size_t &dim, \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant TYPENAME &input, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
if (tid >= dim) { \
return; \
} \
output[get_strided_index(tid, num_dims, dims, strides)] = input; \
} \
kernel void FN_NAME##_##tiled( \
constant size_t &dim, \
constant TYPENAME &input, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
for (uint i = 0; i < TILE_SIZE; i++) { \
const uint idx = tid * TILE_SIZE + i; \
output[idx] = input; \
} \
}
#define UNARY(FN, TYPENAME, FN_NAME, FN_NAME_STRIDED) \
kernel void FN_NAME( \
constant size_t &dim, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
if (tid >= dim) { \
return; \
} \
output[tid] = TYPENAME(FN(float(input[tid]))); \
} \
kernel void FN_NAME##_##strided( \
constant size_t &dim, \
constant size_t &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
if (tid >= dim) { \
return; \
} \
output[tid] = TYPENAME(FN(float(input[get_strided_index(tid, num_dims, dims, strides)]))); \
} \
kernel void FN_NAME##_##tiled( \
constant size_t &dim, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
for (uint i = 0; i < TILE_SIZE; i++) { \
const uint idx = tid * TILE_SIZE + i; \
output[idx] = TYPENAME(FN(float(input[idx]))); \
} \
}
#define UNARY_OP(NAME) \
UNARY(NAME, float, NAME##_f32, NAME##_f32_strided); \
UNARY(NAME, half, NAME##_f16, NAME##_f16_strided);
#define BFLOAT_UNARY_OP(NAME) \
UNARY(NAME, bfloat, NAME##_bf16, NAME##_bf16_strided);
#define COPY2D(FN_NAME, TYPENAME) \
kernel void FN_NAME( \
constant int64_t &d1, \
constant int64_t &d2, \
constant int64_t &src_s, \
constant int64_t &dst_s, \
device const TYPENAME *input, \
device TYPENAME *output, \
uint2 idx [[thread_position_in_grid]] \
) { \
if (idx.x >= d1 || idx.y >= d2) return; \
int64_t src_idx = idx.x * src_s + idx.y; \
int64_t dst_idx = idx.x * dst_s + idx.y; \
output[dst_idx] = input[src_idx]; \
}
COPY2D(copy2d_f32, float)
COPY2D(copy2d_f16, half)
COPY2D(copy2d_u8, uint8_t)
COPY2D(copy2d_u32, uint32_t)
CONST_SET(float, const_set_f32)
CONST_SET(half, const_set_f16)
CONST_SET(uint8_t, const_set_u8)
CONST_SET(uint32_t, const_set_u32)
UNARY_OP(cos)
UNARY_OP(sin)
UNARY_OP(sqr)
UNARY_OP(sqrt)
UNARY_OP(neg)
UNARY_OP(exp)
UNARY_OP(log)
UNARY_OP(gelu)
UNARY_OP(silu)
UNARY_OP(abs)
UNARY_OP(ceil)
UNARY_OP(floor)
UNARY_OP(round)
UNARY_OP(gelu_erf)
UNARY_OP(erf)
UNARY_OP(recip)
UNARY_OP(relu)
UNARY_OP(sign)
UNARY_OP(sigmoid)
UNARY(id, float, copy_f32, copy_f32_strided)
UNARY(id, half, copy_f16, copy_f16_strided)
UNARY(id, uint8_t, copy_u8, copy_u8_strided)
UNARY(id, uint32_t, copy_u32, copy_u32_strided)
// tanh may create NaN on large values, e.g. 45 rather than outputing 1.
// This has been an issue for the encodec example.
UNARY(precise::tanh, float, tanh_f32, tanh_f32_strided);
UNARY(precise::tanh, half, tanh_f16, tanh_f16_strided);
#if __METAL_VERSION__ >= 220
UNARY(id, int64_t, copy_i64, copy_i64_strided)
COPY2D(copy2d_i64, int64_t)
CONST_SET(int64_t, const_set_i64)
#endif
#if defined(__HAVE_BFLOAT__)
BFLOAT_UNARY_OP(cos)
BFLOAT_UNARY_OP(sin)
BFLOAT_UNARY_OP(sqr)
BFLOAT_UNARY_OP(sqrt)
BFLOAT_UNARY_OP(neg)
BFLOAT_UNARY_OP(exp)
BFLOAT_UNARY_OP(log)
BFLOAT_UNARY_OP(gelu)
BFLOAT_UNARY_OP(silu)
BFLOAT_UNARY_OP(abs)
BFLOAT_UNARY_OP(ceil)
BFLOAT_UNARY_OP(floor)
BFLOAT_UNARY_OP(round)
BFLOAT_UNARY_OP(gelu_erf)
BFLOAT_UNARY_OP(erf)
BFLOAT_UNARY_OP(recip)
BFLOAT_UNARY_OP(relu)
BFLOAT_UNARY_OP(sign)
BFLOAT_UNARY_OP(sigmoid)
UNARY(id, bfloat, copy_bf16, copy_bf16_strided)
UNARY(precise::tanh, bfloat, tanh_bf16, tanh_bf16_strided);
COPY2D(copy2d_bf16, bfloat)
CONST_SET(bfloat, const_set_bf16)
#endif
| candle/candle-metal-kernels/src/unary.metal/0 | {
"file_path": "candle/candle-metal-kernels/src/unary.metal",
"repo_id": "candle",
"token_count": 3219
} | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.