text stringlengths 5 631k | id stringlengths 14 178 | metadata dict | __index_level_0__ int64 0 647 |
|---|---|---|---|
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Swinv2 model."""
import collections
import inspect
import unittest
from transformers import Swinv2Config
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import Swinv2Backbone, Swinv2ForImageClassification, Swinv2ForMaskedImageModeling, Swinv2Model
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class Swinv2ModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
patch_size=2,
num_channels=3,
embed_dim=16,
depths=[1, 2, 1],
num_heads=[2, 2, 4],
window_size=2,
mlp_ratio=2.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
patch_norm=True,
initializer_range=0.02,
layer_norm_eps=1e-5,
is_training=True,
scope=None,
use_labels=True,
type_sequence_label_size=10,
encoder_stride=8,
out_features=["stage1", "stage2"],
out_indices=[1, 2],
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.patch_norm = patch_norm
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.is_training = is_training
self.scope = scope
self.use_labels = use_labels
self.type_sequence_label_size = type_sequence_label_size
self.encoder_stride = encoder_stride
self.out_features = out_features
self.out_indices = out_indices
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return Swinv2Config(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
embed_dim=self.embed_dim,
depths=self.depths,
num_heads=self.num_heads,
window_size=self.window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=self.qkv_bias,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
drop_path_rate=self.drop_path_rate,
hidden_act=self.hidden_act,
use_absolute_embeddings=self.use_absolute_embeddings,
path_norm=self.patch_norm,
layer_norm_eps=self.layer_norm_eps,
initializer_range=self.initializer_range,
encoder_stride=self.encoder_stride,
out_features=self.out_features,
out_indices=self.out_indices,
)
def create_and_check_model(self, config, pixel_values, labels):
model = Swinv2Model(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim))
def create_and_check_backbone(self, config, pixel_values, labels):
model = Swinv2Backbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], 16, 16])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
# verify backbone works with out_features=None
config.out_features = None
model = Swinv2Backbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[-1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels):
model = Swinv2ForMaskedImageModeling(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size)
)
# test greyscale images
config.num_channels = 1
model = Swinv2ForMaskedImageModeling(config)
model.to(torch_device)
model.eval()
pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size))
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.type_sequence_label_size
model = Swinv2ForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class Swinv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
Swinv2Model,
Swinv2ForImageClassification,
Swinv2ForMaskedImageModeling,
Swinv2Backbone,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"image-feature-extraction": Swinv2Model, "image-classification": Swinv2ForImageClassification}
if is_torch_available()
else {}
)
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_torch_exportable = True
def setUp(self):
self.model_tester = Swinv2ModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=Swinv2Config,
embed_dim=37,
has_text_modality=False,
common_properties=["image_size", "patch_size", "num_channels"],
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
# TODO: check if this works again for PyTorch 2.x.y
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
expected_num_attentions = len(self.model_tester.depths)
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
window_size_squared = config.window_size**2
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_heads[0], window_size_squared, window_size_squared],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
# also another +1 for reshaped_hidden_states
added_hidden_states = 1 if model_class.__name__ == "Swinv2Backbone" else 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), expected_num_attentions)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_heads[0], window_size_squared, window_size_squared],
)
def check_hidden_states_output(self, inputs_dict, config, model_class, image_size):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# Swinv2 has a different seq_length
patch_size = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[num_patches, self.model_tester.embed_dim],
)
if model_class.__name__ != "Swinv2Backbone":
reshaped_hidden_states = outputs.reshaped_hidden_states
self.assertEqual(len(reshaped_hidden_states), expected_num_layers)
batch_size, num_channels, height, width = reshaped_hidden_states[0].shape
reshaped_hidden_states = (
reshaped_hidden_states[0].view(batch_size, num_channels, height * width).permute(0, 2, 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]),
[num_patches, self.model_tester.embed_dim],
)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
image_size = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
self.check_hidden_states_output(inputs_dict, config, model_class, image_size)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
self.check_hidden_states_output(inputs_dict, config, model_class, image_size)
def test_hidden_states_output_with_padding(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.patch_size = 3
image_size = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
patch_size = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width))
def test_for_masked_image_modeling(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/swinv2-tiny-patch4-window8-256"
model = Swinv2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(reason="Swinv2 does not support feedforward chunking yet")
def test_feed_forward_chunking(self):
pass
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
@require_vision
@require_torch
class Swinv2ModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
if is_vision_available()
else None
)
@slow
def test_inference_image_classification_head(self):
model = Swinv2ForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256").to(
torch_device
)
image_processor = self.default_image_processor
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.3947, -0.4306, 0.0026]).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_fp16(self):
model = Swinv2ForImageClassification.from_pretrained(
"microsoft/swinv2-tiny-patch4-window8-256", dtype=torch.float16
).to(torch_device)
image_processor = self.default_image_processor
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = image_processor(images=image, return_tensors="pt").to(model.dtype).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.3938, -0.4290, 0.0020], dtype=model.dtype).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
# Swinv2 models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions.
model = Swinv2Model.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256").to(torch_device)
image_processor = self.default_image_processor
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = image_processor(images=image, size={"height": 481, "width": 481}, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(pixel_values, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((1, 256, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
@require_torch
class Swinv2BackboneTest(unittest.TestCase, BackboneTesterMixin):
all_model_classes = (Swinv2Backbone,) if is_torch_available() else ()
config_class = Swinv2Config
def setUp(self):
self.model_tester = Swinv2ModelTester(self)
| transformers/tests/models/swinv2/test_modeling_swinv2.py/0 | {
"file_path": "transformers/tests/models/swinv2/test_modeling_swinv2.py",
"repo_id": "transformers",
"token_count": 9462
} | 613 |
# Copyright 2023 The Intel Team Authors, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch TVP model."""
import unittest
from transformers import ResNetConfig, TimmBackboneConfig, TvpConfig
from transformers.testing_utils import require_timm, require_torch, require_vision, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import TvpForVideoGrounding, TvpModel
if is_vision_available():
from PIL import Image
from transformers import TvpImageProcessor
# Copied from test.models.videomae.test_modeling_videomae.VideoMAEModelTester with VideoMAE->TVP
class TVPModelTester:
def __init__(
self,
parent,
batch_size=1,
seq_length=2,
alpha=1.0,
beta=0.1,
visual_prompter_type="framepad",
visual_prompter_apply="replace",
num_frames=2,
max_img_size=448,
visual_prompt_size=96,
vocab_size=100,
hidden_size=32,
intermediate_size=32,
num_hidden_layers=2,
num_attention_heads=4,
max_position_embeddings=30,
max_grid_col_position_embeddings=30,
max_grid_row_position_embeddings=30,
hidden_dropout_prob=0.1,
hidden_act="gelu",
layer_norm_eps=1e-12,
initializer_range=0.02,
pad_token_id=0,
type_vocab_size=2,
attention_probs_dropout_prob=0.1,
):
self.parent = parent
self.batch_size = batch_size
self.input_id_length = seq_length
self.seq_length = seq_length + 10 + 784 # include text prompt length and visual input length
self.alpha = alpha
self.beta = beta
self.visual_prompter_type = visual_prompter_type
self.visual_prompter_apply = visual_prompter_apply
self.num_frames = num_frames
self.max_img_size = max_img_size
self.visual_prompt_size = visual_prompt_size
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.max_grid_col_position_embeddings = max_grid_col_position_embeddings
self.max_grid_row_position_embeddings = max_grid_row_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.pad_token_id = pad_token_id
self.type_vocab_size = type_vocab_size
self.is_training = False
self.num_channels = 3
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.input_id_length], self.vocab_size)
attention_mask = random_attention_mask([self.batch_size, self.input_id_length])
pixel_values = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.max_img_size, self.max_img_size]
)
config = self.get_config()
return (config, input_ids, pixel_values, attention_mask)
def get_config(self):
resnet_config = ResNetConfig(
num_channels=3,
embeddings_size=64,
hidden_sizes=[64, 128],
depths=[2, 2],
hidden_act="relu",
out_features=["stage2"],
out_indices=[2],
)
return TvpConfig(
backbone_config=resnet_config,
backbone=None,
alpha=self.alpha,
beta=self.beta,
visual_prompter_type=self.visual_prompter_type,
visual_prompter_apply=self.visual_prompter_apply,
num_frames=self.num_frames,
max_img_size=self.max_img_size,
visual_prompt_size=self.visual_prompt_size,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
max_grid_col_position_embeddings=self.max_grid_col_position_embeddings,
max_grid_row_position_embeddings=self.max_grid_row_position_embeddings,
layer_norm_eps=self.layer_norm_eps,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
type_vocab_size=self.type_vocab_size,
)
def create_and_check_model(self, config, input_ids, pixel_values, attention_mask):
model = TvpModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, pixel_values, attention_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, pixel_values, attention_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "pixel_values": pixel_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class TVPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as TVP does not use, inputs_embeds.
The seq_length in TVP contain textual and visual inputs, and prompt.
"""
all_model_classes = (TvpModel, TvpForVideoGrounding) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": TvpModel, "temporal-video-grounding": TvpForVideoGrounding}
if is_torch_available()
else {}
)
# TODO: Enable this once this model gets more usage
test_torchscript = False
def setUp(self):
self.model_tester = TVPModelTester(self)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="TVP does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="TVPModel does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
# override as the `logit_scale` parameter initialization is different for TVP
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# params are randomly initialized.
self.assertAlmostEqual(
param.data.mean().item(),
0.0,
delta=1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
@require_timm
def test_backbone_selection(self):
def _validate_backbone_init():
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
# Confirm out_indices propagated to backbone
if model.__class__.__name__ == "TvpModel":
self.assertEqual(len(model.vision_model.backbone.out_indices), 2)
elif model.__class__.__name__ == "TvpForVideoGrounding":
self.assertEqual(len(model.model.vision_model.backbone.out_indices), 2)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Force load_backbone path
config.is_hybrid = False
# We load through configs, as the modeling file assumes config.backbone_config is always set
config.use_pretrained_backbone = False
config.backbone_kwargs = None
# Load a timm backbone
# We hack adding hidden_sizes to the config to test the backbone loading
backbone_config = TimmBackboneConfig("resnet18", out_indices=[-2, -1], hidden_sizes=[64, 128])
config.backbone_config = backbone_config
_validate_backbone_init()
# Load a HF backbone
backbone_config = ResNetConfig.from_pretrained("facebook/dinov2-small", out_indices=[-2, -1])
config.backbone_config = backbone_config
_validate_backbone_init()
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_torch
class TvpModelIntegrationTests(unittest.TestCase):
@cached_property
def default_image_processor(self):
return TvpImageProcessor.from_pretrained("Jiqing/tiny-random-tvp")
def test_inference_no_head(self):
model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
encoding = image_processor(images=image, return_tensors="pt")
input_ids = torch.tensor([[1, 2]])
attention_mask = torch.tensor([[1, 1]])
encoding.update({"input_ids": input_ids, "attention_mask": attention_mask})
encoding.to(torch_device)
with torch.no_grad():
outputs = model(**encoding)
expected_shape = torch.Size((1, 796, 128))
assert outputs.last_hidden_state.shape == expected_shape
expected_slice = torch.tensor(
[[-0.4902, -0.4121, -1.7872], [-0.2184, 2.1211, -0.9371], [0.1180, 0.5003, -0.1727]]
).to(torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_with_head(self):
model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
encoding = image_processor(images=image, return_tensors="pt")
input_ids = torch.tensor([[1, 2]])
attention_mask = torch.tensor([[1, 1]])
encoding.update({"input_ids": input_ids, "attention_mask": attention_mask})
encoding.to(torch_device)
with torch.no_grad():
outputs = model(**encoding)
expected_shape = torch.Size((1, 2))
assert outputs.logits.shape == expected_shape
expected_slice = torch.tensor([[0.5061, 0.4988]]).to(torch_device)
torch.testing.assert_close(outputs.logits, expected_slice, rtol=1e-4, atol=1e-4)
def test_interpolate_inference_no_head(self):
model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img() # 480X640
encoding = image_processor(
images=image, return_tensors="pt", do_resize=False, do_pad=False, do_center_crop=False
)
input_ids = torch.tensor([[1, 2]])
attention_mask = torch.tensor([[1, 1]])
encoding.update({"input_ids": input_ids, "attention_mask": attention_mask})
encoding.to(torch_device)
with torch.no_grad():
outputs = model(**encoding, interpolate_pos_encoding=True)
expected_shape = torch.Size((1, 1212, 128))
assert outputs.last_hidden_state.shape == expected_shape
def test_interpolate_inference_with_head(self):
model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img() # 480X640
encoding = image_processor(
images=image, return_tensors="pt", do_resize=False, do_pad=False, do_center_crop=False
)
input_ids = torch.tensor([[1, 2]])
attention_mask = torch.tensor([[1, 1]])
encoding.update({"input_ids": input_ids, "attention_mask": attention_mask})
encoding.to(torch_device)
with torch.no_grad():
outputs = model(**encoding, interpolate_pos_encoding=True, output_hidden_states=True)
expected_shape = torch.Size((1, 1212, 128))
assert outputs.hidden_states[-1].shape == expected_shape
| transformers/tests/models/tvp/test_modeling_tvp.py/0 | {
"file_path": "transformers/tests/models/tvp/test_modeling_tvp.py",
"repo_id": "transformers",
"token_count": 5911
} | 614 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch VitPose model."""
import inspect
import unittest
import requests
from transformers import VitPoseBackboneConfig, VitPoseConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from transformers.utils.import_utils import get_torch_major_and_minor_version
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import VitPoseForPoseEstimation
if is_vision_available():
from PIL import Image
from transformers import VitPoseImageProcessor
class VitPoseModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=[16 * 8, 12 * 8],
patch_size=[8, 8],
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=2,
scale_factor=4,
out_indices=[-1],
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scale_factor = scale_factor
self.out_indices = out_indices
self.scope = scope
# in VitPose, the seq length equals the number of patches
num_patches = (image_size[0] // patch_size[0]) * (image_size[1] // patch_size[1])
self.seq_length = num_patches
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return VitPoseConfig(
backbone_config=self.get_backbone_config(),
)
def get_backbone_config(self):
return VitPoseBackboneConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
num_hidden_layers=self.num_hidden_layers,
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
num_attention_heads=self.num_attention_heads,
hidden_act=self.hidden_act,
out_indices=self.out_indices,
)
def create_and_check_for_pose_estimation(self, config, pixel_values, labels):
model = VitPoseForPoseEstimation(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_height = (self.image_size[0] // self.patch_size[0]) * self.scale_factor
expected_width = (self.image_size[1] // self.patch_size[1]) * self.scale_factor
self.parent.assertEqual(
result.heatmaps.shape, (self.batch_size, self.num_labels, expected_height, expected_width)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
labels,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class VitPoseModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as VitPose does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (VitPoseForPoseEstimation,) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_torch_exportable = True
test_torch_exportable_strictly = get_torch_major_and_minor_version() != "2.7"
def setUp(self):
self.model_tester = VitPoseModelTester(self)
self.config_tester = ConfigTester(self, config_class=VitPoseConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def test_batching_equivalence(self, atol=3e-4, rtol=3e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
@unittest.skip(reason="VitPose does not support input and output embeddings")
def test_model_common_attributes(self):
pass
@unittest.skip(reason="VitPose does not support input and output embeddings")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="VitPose does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="VitPose does not support training yet")
def test_training(self):
pass
@unittest.skip(reason="VitPose does not support training yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="VitPose does not support training yet")
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(reason="VitPose does not support training yet")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_for_pose_estimation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pose_estimation(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "usyd-community/vitpose-base-simple"
model = VitPoseForPoseEstimation.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of people in house
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000000139.jpg"
image = Image.open(requests.get(url, stream=True).raw)
return image
@require_torch
@require_vision
class VitPoseModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
VitPoseImageProcessor.from_pretrained("usyd-community/vitpose-base-simple")
if is_vision_available()
else None
)
@slow
def test_inference_pose_estimation(self):
image_processor = self.default_image_processor
model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-base-simple", device_map=torch_device)
image = prepare_img()
boxes = [[[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]]]
inputs = image_processor(images=image, boxes=boxes, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
heatmaps = outputs.heatmaps
assert heatmaps.shape == (2, 17, 64, 48)
expected_slice = torch.tensor(
[
[9.9330e-06, 9.9330e-06, 9.9330e-06],
[9.9330e-06, 9.9330e-06, 9.9330e-06],
[9.9330e-06, 9.9330e-06, 9.9330e-06],
]
).to(torch_device)
assert torch.allclose(heatmaps[0, 0, :3, :3], expected_slice, atol=1e-4)
pose_results = image_processor.post_process_pose_estimation(outputs, boxes=boxes)[0]
expected_bbox = torch.tensor([391.9900, 190.0800, 391.1575, 189.3034])
expected_keypoints = torch.tensor(
[
[3.9813e02, 1.8184e02],
[3.9828e02, 1.7981e02],
[3.9596e02, 1.7948e02],
]
)
expected_scores = torch.tensor([8.7529e-01, 8.4315e-01, 9.2678e-01])
self.assertEqual(len(pose_results), 2)
torch.testing.assert_close(pose_results[1]["bbox"].cpu(), expected_bbox, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(pose_results[1]["keypoints"][:3].cpu(), expected_keypoints, rtol=1e-2, atol=1e-2)
torch.testing.assert_close(pose_results[1]["scores"][:3].cpu(), expected_scores, rtol=1e-4, atol=1e-4)
@slow
def test_batched_inference(self):
image_processor = self.default_image_processor
model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-base-simple", device_map=torch_device)
image = prepare_img()
boxes = [
[[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]],
[[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]],
]
inputs = image_processor(images=[image, image], boxes=boxes, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
heatmaps = outputs.heatmaps
assert heatmaps.shape == (4, 17, 64, 48)
expected_slice = torch.tensor(
[
[9.9330e-06, 9.9330e-06, 9.9330e-06],
[9.9330e-06, 9.9330e-06, 9.9330e-06],
[9.9330e-06, 9.9330e-06, 9.9330e-06],
]
).to(torch_device)
assert torch.allclose(heatmaps[0, 0, :3, :3], expected_slice, atol=1e-4)
pose_results = image_processor.post_process_pose_estimation(outputs, boxes=boxes)
print(pose_results)
expected_bbox = torch.tensor([391.9900, 190.0800, 391.1575, 189.3034])
expected_keypoints = torch.tensor(
[
[3.9813e02, 1.8184e02],
[3.9828e02, 1.7981e02],
[3.9596e02, 1.7948e02],
]
)
expected_scores = torch.tensor([8.7529e-01, 8.4315e-01, 9.2678e-01])
self.assertEqual(len(pose_results), 2)
self.assertEqual(len(pose_results[0]), 2)
torch.testing.assert_close(pose_results[0][1]["bbox"].cpu(), expected_bbox, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(pose_results[0][1]["keypoints"][:3].cpu(), expected_keypoints, rtol=1e-2, atol=1e-2)
torch.testing.assert_close(pose_results[0][1]["scores"][:3].cpu(), expected_scores, rtol=1e-4, atol=1e-4)
| transformers/tests/models/vitpose/test_modeling_vitpose.py/0 | {
"file_path": "transformers/tests/models/vitpose/test_modeling_vitpose.py",
"repo_id": "transformers",
"token_count": 5525
} | 615 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
import unittest
from transformers.models.wav2vec2 import Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor
from transformers.models.wav2vec2.tokenization_wav2vec2 import VOCAB_FILES_NAMES
from transformers.utils import FEATURE_EXTRACTOR_NAME
from ...test_processing_common import ProcessorTesterMixin
from .test_feature_extraction_wav2vec2 import floats_list
class Wav2Vec2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Wav2Vec2Processor
audio_input_name = "input_values"
text_input_name = "labels"
@classmethod
def setUpClass(cls):
vocab = "<pad> <s> </s> <unk> | E T A O N I H S R D L U M W C F G Y P B V K ' X J Q Z".split(" ")
vocab_tokens = dict(zip(vocab, range(len(vocab))))
cls.add_kwargs_tokens_map = {
"pad_token": "<pad>",
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
feature_extractor_map = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16000,
"return_attention_mask": False,
"do_normalize": True,
}
cls.tmpdirname = tempfile.mkdtemp()
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
cls.feature_extraction_file = os.path.join(cls.tmpdirname, FEATURE_EXTRACTOR_NAME)
with open(cls.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(cls.feature_extraction_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(feature_extractor_map) + "\n")
tokenizer = cls.get_tokenizer()
tokenizer.save_pretrained(cls.tmpdirname)
@classmethod
def get_tokenizer(cls, **kwargs_init):
kwargs = cls.add_kwargs_tokens_map.copy()
kwargs.update(kwargs_init)
return Wav2Vec2CTCTokenizer.from_pretrained(cls.tmpdirname, **kwargs)
def get_feature_extractor(self, **kwargs):
return Wav2Vec2FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor)
with tempfile.TemporaryDirectory() as tmpdir:
processor.save_pretrained(tmpdir)
processor = Wav2Vec2Processor.from_pretrained(tmpdir)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, Wav2Vec2CTCTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, Wav2Vec2FeatureExtractor)
def test_save_load_pretrained_additional_features(self):
with tempfile.TemporaryDirectory() as tmpdir:
processor = Wav2Vec2Processor(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()
)
processor.save_pretrained(tmpdir)
tokenizer_add_kwargs = Wav2Vec2CTCTokenizer.from_pretrained(
tmpdir, **(self.add_kwargs_tokens_map | {"bos_token": "(BOS)", "eos_token": "(EOS)"})
)
feature_extractor_add_kwargs = Wav2Vec2FeatureExtractor.from_pretrained(
tmpdir, do_normalize=False, padding_value=1.0
)
processor = Wav2Vec2Processor.from_pretrained(
tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, Wav2Vec2CTCTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, Wav2Vec2FeatureExtractor)
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor)
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="np")
input_processor = processor(raw_speech, return_tensors="np")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_tokenizer(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = "This is a test string"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok:
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_tokenizer_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = Wav2Vec2Processor(tokenizer=tokenizer, feature_extractor=feature_extractor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
processor = self.get_processor()
text = "lower newer"
audio_inputs = self.prepare_audio_inputs()
inputs = processor(text=text, audio=audio_inputs, return_attention_mask=True, return_tensors="pt")
self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
| transformers/tests/models/wav2vec2/test_processing_wav2vec2.py/0 | {
"file_path": "transformers/tests/models/wav2vec2/test_processing_wav2vec2.py",
"repo_id": "transformers",
"token_count": 2838
} | 616 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import WhisperTokenizer, WhisperTokenizerFast, is_speech_available
from transformers.testing_utils import require_sentencepiece, require_torch, require_torchaudio
from .test_feature_extraction_whisper import floats_list
if is_speech_available():
from transformers import WhisperFeatureExtractor, WhisperProcessor
TRANSCRIBE = 50358
NOTIMESTAMPS = 50362
@require_torch
@require_torchaudio
@require_sentencepiece
class WhisperProcessorTest(unittest.TestCase):
def setUp(self):
self.checkpoint = "openai/whisper-small.en"
self.tmpdirname = tempfile.mkdtemp()
def get_tokenizer(self, **kwargs):
return WhisperTokenizer.from_pretrained(self.checkpoint, **kwargs)
def get_feature_extractor(self, **kwargs):
return WhisperFeatureExtractor.from_pretrained(self.checkpoint, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
processor.save_pretrained(self.tmpdirname)
processor = WhisperProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, WhisperTokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, WhisperFeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0)
processor = WhisperProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, WhisperTokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, WhisperFeatureExtractor)
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="np")
input_processor = processor(raw_speech, return_tensors="np")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_tokenizer(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = "This is a test string"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok:
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_tokenizer_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_get_decoder_prompt_ids(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
forced_decoder_ids = processor.get_decoder_prompt_ids(task="transcribe", no_timestamps=True)
self.assertIsInstance(forced_decoder_ids, list)
for ids in forced_decoder_ids:
self.assertIsInstance(ids, (list, tuple))
expected_ids = [TRANSCRIBE, NOTIMESTAMPS]
self.assertListEqual([ids[-1] for ids in forced_decoder_ids], expected_ids)
def test_get_prompt_ids(self):
processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
prompt_ids = processor.get_prompt_ids("Mr. Quilter")
decoded_prompt = processor.tokenizer.decode(prompt_ids)
self.assertListEqual(prompt_ids.tolist(), [50360, 1770, 13, 2264, 346, 353])
self.assertEqual(decoded_prompt, "<|startofprev|> Mr. Quilter")
def test_empty_get_prompt_ids(self):
processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
prompt_ids = processor.get_prompt_ids("")
decoded_prompt = processor.tokenizer.decode(prompt_ids)
self.assertListEqual(prompt_ids.tolist(), [50360, 220])
self.assertEqual(decoded_prompt, "<|startofprev|> ")
def test_get_prompt_ids_with_special_tokens(self):
processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
def _test_prompt_error_raised_helper(prompt, special_token):
with pytest.raises(ValueError) as excinfo:
processor.get_prompt_ids(prompt)
expected = f"Encountered text in the prompt corresponding to disallowed special token: {special_token}."
self.assertEqual(expected, str(excinfo.value))
_test_prompt_error_raised_helper("<|startofprev|> test", "<|startofprev|>")
_test_prompt_error_raised_helper("test <|notimestamps|>", "<|notimestamps|>")
_test_prompt_error_raised_helper("test <|zh|> test <|transcribe|>", "<|zh|>")
def test_find_longest_common_subsequence_old(self):
"""Test using the old processing functions used in the ASR pipeline, but that serves as a BC reference."""
max_source_positions = 1500
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
previous_sequence = [[51492, 406, 3163, 1953, 466, 13, 51612, 51612]]
self.assertEqual(
processor.decode(previous_sequence[0], output_offsets=True),
{
"text": " not worth thinking about.",
"offsets": [{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}],
},
)
# Merge when the previous sequence is a suffix of the next sequence
# fmt: off
next_sequences_1 = [
[50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257]
]
# fmt: on
self.assertEqual(
processor.decode(next_sequences_1[0], output_offsets=True),
{
"text": (
" of spectators, retrievality is not worth thinking about. His instant panic was followed by a"
" small, sharp blow high on his chest.<|endoftext|>"
),
"offsets": [
{"text": " of spectators, retrievality is not worth thinking about.", "timestamp": (0.0, 5.0)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (5.0, 9.4),
},
],
},
)
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_1, (480_000, 120_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
# fmt: off
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 51739, 51739, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959],
)
# fmt: on
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{"text": " not worth thinking about.", "timestamp": (22.56, 27.5)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (27.5, 31.900000000000002),
},
],
},
)
# Merge when the sequence is in the middle of the 1st next sequence
# fmt: off
next_sequences_2 = [
[50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257]
]
# fmt: on
# {'text': ' of spectators, retrievality is not worth thinking about. His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)}
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_2, (480_000, 120_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
# fmt: off
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959],
)
# fmt: on
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on"
" his chest."
),
"timestamp": (22.56, 31.900000000000002),
},
],
},
)
# Merge when the previous sequence is not included in the current sequence
next_sequences_3 = [[50364, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50584, 50257]] # fmt: skip
# {'text': ' His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)}
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 120_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51832],
) # fmt: skip
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (24.96, 29.36),
},
],
},
)
# last case is when the sequence is not in the first next predicted start and end of timestamp
next_sequences_3 = [
[50364, 2812, 9836, 14783, 390, 406, 3163, 1953, 466, 13, 50634, 50634, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50934]
] # fmt: skip
merge = _find_timestamp_sequence(
[[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 167_000, 0)]],
processor.tokenizer,
processor.feature_extractor,
max_source_positions,
)
self.assertEqual(
merge,
[51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51912]
) # fmt: skip
self.assertEqual(
processor.decode(merge, output_offsets=True),
{
"text": (
" not worth thinking about. His instant panic was followed by a small, sharp blow high on his"
" chest."
),
"offsets": [
{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)},
{
"text": " His instant panic was followed by a small, sharp blow high on his chest.",
"timestamp": (24.96, 30.96),
},
],
},
)
def _fast_find_longest_common_sequence(sequence_left, sequence_right):
"""Old processing function used in the ASR pipeline."""
seq_len_left = len(sequence_left)
seq_len_right = len(sequence_right)
counter = [[0] * (seq_len_right + 1) for _ in range(seq_len_left + 1)]
longest = 0
for i in range(seq_len_left):
for j in range(seq_len_right):
if sequence_left[i] == sequence_right[j]:
previous_counter = counter[i][j] + 1
counter[i + 1][j + 1] = previous_counter
if previous_counter > longest:
longest = previous_counter
counter = np.array(counter)
# we return the idx of the first element of the longest common sequence in the left sequence
index_left = np.argwhere(counter == longest)[-1][0] - longest if longest != 0 else -1
index_right = np.argwhere(counter == longest)[-1][1] - longest if longest != 0 else -1
return index_left, index_right, longest
def _find_timestamp_sequence(sequences, tokenizer, feature_extractor, max_source_positions):
"""
Old processing function used in the ASR pipeline.
Computes the final sequences by merging the end of the nth sequence with the beginning of the n+1th sequence. Since
`WhisperForConditionalGeneration` produces the timestamps pairwise, we filter the consecutive timestamps and only
iterate over them. We keep track of the `time` which indicates the actual starting time of the chunk that is
processed. We need to make sure to offset the timestamps tokens by the `time` in order for the tokenizer to
properly compute the final `offset`.
"""
# index of the first timestamp token
timestamp_begin = tokenizer.convert_tokens_to_ids("<|notimestamps|>") + 1
items = []
# approximation of the token to time ratio : ~0.2seconds
time_precision = feature_extractor.chunk_length / max_source_positions
time = 0
for seq_idx, item in enumerate(sequences):
sequence, stride = item
if isinstance(sequence, list):
sequence = np.array(sequence)
chunk_len, stride_left, stride_right = stride
sequence = sequence.squeeze(0)
# get rid of the `forced_decoder_idx` that are use to parametrize the generation
begin_idx = np.where(sequence == timestamp_begin)[0][0] if timestamp_begin in sequence else 0
sequence = sequence[begin_idx:]
timestamp_tokens = sequence >= timestamp_begin
if seq_idx != 0 and sum(timestamp_tokens) > 0:
consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1
last_timestamp = np.where(timestamp_tokens)[0][-1]
consecutive = np.append(consecutive, last_timestamp) if last_timestamp not in consecutive else consecutive
time -= stride_left + stride_right
offset = int((time / feature_extractor.sampling_rate) / time_precision)
overlap_time = int((stride_left / feature_extractor.sampling_rate) / time_precision)
# relevant timestamps are in the overlapping part
relevant_timestamp = np.where(sequence[consecutive] >= timestamp_begin + overlap_time)[0]
if relevant_timestamp.shape[0] > 0:
relevant_timestamp = (
consecutive[relevant_timestamp[0] - 1] if relevant_timestamp[0] > 0 else consecutive[0]
)
# if a big stride is used, we need to check some of the previous items for the best overlap
best_match = 0
sliced_sequence = []
for idx, previous_sequence in enumerate(reversed(items)):
previous_tokens = previous_sequence[1:-1]
if previous_sequence[0] < (timestamp_begin + offset - overlap_time) and idx != 0:
break # the previous sequence is too far in the past
if len(previous_tokens) > 0:
# find the longest common sequence between the overlapping parts
index_left, index_right, match_length = _fast_find_longest_common_sequence(
sequence[1:relevant_timestamp], previous_tokens
)
# don't do anything if only 1 token was matched
if match_length > 1 and match_length > best_match:
best_match = match_length
best_idx = idx
end_of_curr_sequence_idx = (
np.where(sequence[index_left + 1 :] >= timestamp_begin)[0][0] + 1
)
end_of_curr_sequence_idx = end_of_curr_sequence_idx + 1 + index_left
# if all the tokens are matched, suffix
if index_left == 0 and match_length == len(previous_tokens):
sliced_sequence = np.insert(
sequence[index_left + 1 : end_of_curr_sequence_idx], 0, previous_sequence[0]
)
sliced_sequence[-1] = previous_sequence[-1]
# if part of the previous sequence is not taken
elif index_left >= 0:
sliced_sequence = sequence[index_left + 1 : end_of_curr_sequence_idx]
# let's insert the missing part of the previous sequence
previous_slice = (
previous_sequence[: index_right + 1] if index_right > 0 else [previous_sequence[0]]
)
sliced_sequence = np.insert(sliced_sequence, 0, previous_slice)
sliced_sequence[-1] += offset
if len(sliced_sequence) > 0:
items[len(items) - best_idx - 1] = sliced_sequence
items = items[: len(items) - best_idx]
sequence = sequence[end_of_curr_sequence_idx:]
# sequence might have changed
timestamp_tokens = sequence >= timestamp_begin
consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1
if sum(timestamp_tokens) > 0:
last_timestamp = np.where(timestamp_tokens)[0][-1]
consecutive = (
np.append(consecutive, last_timestamp + 1) if last_timestamp not in consecutive else consecutive
)
if len(consecutive) > 0:
last_slice = 0
for current_slice in consecutive:
actual_offset = items[-1][-1] if seq_idx != 0 or last_slice != 0 else sequence[0]
sliced_tokens = sequence[last_slice:current_slice]
duration = sliced_tokens[-1] - sliced_tokens[0]
sliced_tokens[0] = actual_offset
sliced_tokens[-1] = actual_offset + duration
items.append(sliced_tokens)
last_slice = current_slice
time += chunk_len
result = []
for i in range(len(items)):
result += items[i].tolist()
return result
| transformers/tests/models/whisper/test_processing_whisper.py/0 | {
"file_path": "transformers/tests/models/whisper/test_processing_whisper.py",
"repo_id": "transformers",
"token_count": 10043
} | 617 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Zamba model."""
import math
import tempfile
import unittest
import pytest
from parameterized import parameterized
from transformers import AutoTokenizer, Zamba2Config, is_torch_available
from transformers.testing_utils import (
Expectations,
require_bitsandbytes,
require_flash_attn,
require_torch,
require_torch_gpu,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
Zamba2ForCausalLM,
Zamba2ForSequenceClassification,
Zamba2Model,
)
from transformers.models.zamba2.modeling_zamba2 import (
Zamba2HybridDynamicCache,
)
class Zamba2ModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=16,
mamba_d_state=2,
chunk_size=8,
mamba_dt_rank="auto",
num_hidden_layers=2,
num_attention_heads=2,
n_mamba_heads=8,
mamba_ngroups=8,
intermediate_size=4,
hidden_act="gelu",
hidden_mamba_act="silu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
layers_block_type=["mamba", "hybrid"],
num_mem_blocks=1,
use_mem_rope=True,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.mamba_dt_rank = mamba_dt_rank
self.mamba_d_state = mamba_d_state
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.n_mamba_heads = n_mamba_heads
self.mamba_ngroups = mamba_ngroups
self.chunk_size = chunk_size
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_mamba_act = hidden_mamba_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.layers_block_type = layers_block_type
self.num_mem_blocks = num_mem_blocks
self.use_mem_rope = use_mem_rope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return Zamba2Config(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
mamba_dt_rank=self.mamba_dt_rank,
mamba_d_state=self.mamba_d_state,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
n_mamba_heads=self.n_mamba_heads,
intermediate_size=self.intermediate_size,
chunk_size=self.chunk_size,
hidden_act=self.hidden_act,
mamba_ngroups=self.mamba_ngroups,
hidden_mamba_act=self.hidden_mamba_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=True,
initializer_range=self.initializer_range,
use_mamba_kernels=False,
layers_block_type=self.layers_block_type,
num_mem_blocks=self.num_mem_blocks,
use_mem_rope=self.use_mem_rope,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = Zamba2Model(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
model = Zamba2ForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids, labels=token_labels)
result = model(input_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
config.is_decoder = True
config.add_cross_attention = False
model = Zamba2ForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
# Attention: Zamba2 needs the cache to be initialized to return a cache!
past_key_values = Zamba2HybridDynamicCache(config, input_ids.shape[0], model.dtype, device=model.device)
outputs = model(
input_ids,
attention_mask=input_mask,
past_key_values=past_key_values,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 1), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
cache_position=torch.arange(
input_ids.shape[1], input_ids.shape[1] + next_tokens.shape[1], device=model.device
),
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_sequence_classification(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = Zamba2ForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class Zamba2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
test_torchscript = False
all_model_classes = (
(
Zamba2Model,
Zamba2ForCausalLM,
Zamba2ForSequenceClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": Zamba2Model,
"text-classification": Zamba2ForSequenceClassification,
"text-generation": Zamba2ForCausalLM,
"zero-shot": Zamba2ForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
def _check_past_key_values_for_generate(self, batch_size, decoder_past_key_values, cache_length, config):
self.assertIsInstance(decoder_past_key_values, Zamba2HybridDynamicCache)
# (batch, head, seq_length, head_features)
expected_shape = (
batch_size,
config.num_key_value_heads if hasattr(config, "num_key_value_heads") else config.num_attention_heads,
cache_length,
config.hidden_size // config.num_attention_heads,
)
self.assertListEqual(
[key_tensor.shape for key_tensor in decoder_past_key_values.key_cache],
[expected_shape] * len(decoder_past_key_values.key_cache),
)
self.assertListEqual(
[value_cache.shape for value_cache in decoder_past_key_values.value_cache],
[expected_shape] * len(decoder_past_key_values.value_cache),
)
def setUp(self):
self.model_tester = Zamba2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=Zamba2Config, hidden_size=37)
@unittest.skip("position_ids cannot be used to pad due to Mamba2 layers")
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
def test_past_key_values_format(self):
"""
Overwriting to pass the expected cache shapes (Zamba2 has cache shape = [batch_size, 0] for mamba layers)
"""
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
batch_size, seq_length = inputs["input_ids"].shape
per_head_embed_dim = config.attention_head_dim # note: this one is not a common attribute name
self_attention_cache_shape = (batch_size, config.num_key_value_heads, seq_length, per_head_embed_dim)
# build the full cache shapes, including mamba layers
all_cache_shapes = []
for i in range(config.num_hidden_layers):
if config.layers_block_type[i] == "mamba":
all_cache_shapes.append([torch.Size([batch_size, 0]), torch.Size([batch_size, 0])])
else:
all_cache_shapes.append([self_attention_cache_shape, self_attention_cache_shape])
super().test_past_key_values_format(custom_all_cache_shapes=all_cache_shapes)
@unittest.skip(reason="Zamba2 has hybrid cache.")
def test_generate_continue_from_inputs_embeds(self):
pass
@unittest.skip(reason="A large mamba2 would be necessary (and costly) for that")
def test_multi_gpu_data_parallel_forward(self):
pass
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_initialization(self):
r"""
Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
if "A_log" in name:
A = torch.arange(1, config.n_mamba_heads + 1, dtype=torch.float32)[None, :]
self.assertTrue(torch.allclose(param.data, torch.log(A), atol=1e-5, rtol=1e-5))
elif "D" in name:
# check if it's a ones like
self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5))
elif "dt_bias" in name:
dt = torch.exp(
torch.tensor([0, 1]) * (math.log(config.time_step_max) - math.log(config.time_step_min))
+ math.log(config.time_step_min)
).clamp(min=config.time_step_floor)
inv_dt = dt + torch.log(-torch.expm1(-dt))
if param.requires_grad:
self.assertTrue(param.data.max().item() <= inv_dt[1])
self.assertTrue(param.data.min().item() >= inv_dt[0])
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
@unittest.skip(reason="Cumbersome and redundant for Zamba2")
def test_mismatched_shapes_have_properly_initialized_weights(self):
r"""
Overriding the test_mismatched_shapes_have_properly_initialized_weights test because A_log and D params of the
Mamba block are initialized differently and we tested that in test_initialization
"""
pass
def test_attention_outputs(self):
r"""
Overriding the test_attention_outputs test as the Zamba2 model outputs attention only for its attention layers
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def _get_input_ids_and_config(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
return config, input_ids, input_mask
def test_left_padding_compatibility(self):
r"""
Overriding the test_left_padding_compatibility test as the mamba layers accentuate the numerical differences
effect of the left padding discussed in the issue in the note. Using a more permissive tolerance value.
"""
import inspect
# NOTE: left-padding results in small numerical differences. This is expected.
# See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535
# First, filter out models that don't support left padding - generative and decoder-only.
# Zamba2 is a decoder-only architecture
decoder_only_classes = self.all_generative_model_classes
# Then, test left-padding
def _prepare_model_kwargs(input_ids, attention_mask, signature):
model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask}
if "position_ids" in signature:
position_ids = torch.cumsum(attention_mask, dim=-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
model_kwargs["position_ids"] = position_ids
if "cache_position" in signature:
cache_position = torch.arange(input_ids.shape[-1], device=torch_device)
model_kwargs["cache_position"] = cache_position
return model_kwargs
for model_class in decoder_only_classes:
config, input_ids, attention_mask = self._get_input_ids_and_config()
model = model_class(config).to(torch_device).eval()
signature = inspect.signature(model.forward).parameters.keys()
# Without padding
model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, signature)
next_logits_wo_padding = model(**model_kwargs).logits[:, -1, :]
# With left-padding (length 32)
pad_size = (input_ids.shape[0], 32)
padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * config.pad_token_id
padded_input_ids = torch.cat((padding, input_ids), dim=1)
padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1)
model_kwargs = _prepare_model_kwargs(padded_input_ids, padded_attention_mask, signature)
next_logits_with_padding = model(**model_kwargs).logits[:, -1, :]
# They should result in very similar logits
self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=3e-3))
@require_flash_attn
@require_torch_gpu
@require_bitsandbytes
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_fp32_ln(self):
r"""
Overriding the test_flash_attn_2_fp32_ln test as the Zamba2 model, like Mixtral, doesn't support
right padding + use cache with FA2
"""
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
dummy_input = inputs_dict[model.main_input_name]
dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input))
# NOTE: Zamba2 does not support right padding + use_cache with FA2.
dummy_attention_mask[:, -1] = 1
model = model_class.from_pretrained(
tmpdirname,
dtype=torch.float16,
attn_implementation="flash_attention_2",
load_in_4bit=True,
)
for _, param in model.named_parameters():
# upcast only layer norms
if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16):
param.data = param.data.to(torch.float32)
_ = model(dummy_input)
# with attention mask
_ = model(dummy_input, attention_mask=dummy_attention_mask)
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
r"""
Overriding the test_flash_attn_2_inference_padding_right test as the Zamba2 model, like Mixtral, doesn't support
right padding + use cache with FA2
"""
self.skipTest(reason="Zamba2 flash attention does not support right padding")
@unittest.skip(reason="Zamba2 has its own special cache type")
@parameterized.expand([(1, False), (1, True), (4, False)])
def test_new_cache_format(self, num_beams, do_sample):
pass
@require_torch_gpu
def test_flex_attention_with_grads(self):
"""
Overwriting as the base hidden size is big enough for compile.
Manipulation of dims causes issues due to other constraints not being satisfied anymore.
"""
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config._attn_implementation = "flex_attention"
model = model_class(config).to(device=torch_device)
self.assertTrue(model.config._attn_implementation == "flex_attention")
# Elaborate workaround for encoder-decoder models as some do not specify their main input
dummy_inputs = {model.main_input_name: inputs_dict[model.main_input_name].to(torch_device)}
if config.is_encoder_decoder:
dummy_inputs["decoder_input_ids"] = inputs_dict["decoder_input_ids"].to(torch_device)
dummy_inputs["decoder_attention_mask"] = inputs_dict["decoder_attention_mask"].to(torch_device)
# If this does not raise an error, the test passes (see https://github.com/huggingface/transformers/pull/35605)
_ = model(**dummy_inputs)
@require_torch
class Zamba2ModelIntegrationTest(unittest.TestCase):
model = None
tokenizer = None
@classmethod
@slow
def setUpClass(cls):
model_id = "Zyphra/Zamba2-1.2B"
cls.model = Zamba2ForCausalLM.from_pretrained(model_id, dtype=torch.float32, revision="PR")
cls.tokenizer = AutoTokenizer.from_pretrained(model_id, revision="PR")
@parameterized.expand([(torch_device,), ("cpu",)])
@slow
def test_simple_generate(self, torch_device):
self.model.to(torch_device)
input_ids = self.tokenizer("Hey how are you doing on this lovely evening?", return_tensors="pt")[
"input_ids"
].to(torch_device)
out = self.model.generate(input_ids, do_sample=False, max_new_tokens=10)
output_sentence = self.tokenizer.decode(out[0, :])
self.assertEqual(
output_sentence,
"<s> Hey how are you doing on this lovely evening?\n\nI'm doing well, thanks for",
)
with torch.no_grad():
logits = self.model(input_ids=input_ids).logits.to(dtype=torch.float32)
EXPECTED_LOGITS_NO_GRAD = torch.tensor(
[
-5.9587, 10.5152, 7.0382, -2.8728, -4.8143, -4.8142, -4.8142, -4.8144,
-4.8143, -4.8143, -4.8142, -4.8142, 6.0185, 18.0037, -4.8142, -4.8144,
-4.8143, -4.8142, -4.8143, -4.8143, -4.8143, -4.8143, -4.8142, -4.8143,
-4.8144, -4.8143, -4.8143, -4.8141, -4.8142, -4.8142, -4.8142, -4.8144,
-4.8143, -4.8143, -4.8143, -4.8142, -4.8144, -4.8144, -4.8142, -4.8142
]
, dtype=torch.float32) # fmt: skip
torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD, rtol=1e-3, atol=1e-3)
@parameterized.expand([(torch_device,), ("cpu",)])
@slow
def test_simple_batched_generate_with_padding(self, torch_device):
self.model.to(torch_device)
inputs = self.tokenizer(
["Hey how are you doing on this lovely evening?", "When did the Roman empire "],
padding=True,
return_tensors="pt",
).to(torch_device)
out = self.model.generate(**inputs, do_sample=False, max_new_tokens=10)
output_sentences = self.tokenizer.batch_decode(out)
self.assertEqual(
output_sentences[0],
"<s> Hey how are you doing on this lovely evening?\n\nI'm doing well, thanks for",
)
self.assertEqual(
output_sentences[1],
"[PAD][PAD][PAD][PAD]<s> When did the Roman empire 1st fall?\nThe Roman Empire fell in",
)
with torch.no_grad():
logits = self.model(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"]).logits.to(
dtype=torch.float32
)
EXPECTED_LOGITS_NO_GRAD_0 = torch.tensor(
[
-5.9611, 10.5208, 7.0411, -2.8743, -4.8167, -4.8167, -4.8167, -4.8168,
-4.8167, -4.8167, -4.8167, -4.8166, 6.0218, 18.0062, -4.8167, -4.8168,
-4.8167, -4.8167, -4.8167, -4.8168, -4.8168, -4.8168, -4.8167, -4.8167,
-4.8168, -4.8167, -4.8167, -4.8165, -4.8167, -4.8167, -4.8167, -4.8169,
-4.8168, -4.8168, -4.8168, -4.8166, -4.8169, -4.8168, -4.8167, -4.8167
]
, dtype=torch.float32) # fmt: skip
EXPECTED_LOGITS_NO_GRAD_1S = Expectations(
{
("xpu", 3): torch.tensor([0.2027, 6.3481, 3.8392, -5.7279, -6.5090, -6.5088, -6.5087, -6.5088,
-6.5087, -6.5088, -6.5090, -6.5089, 7.8796, 13.5483, -6.5088, -6.5080,
-6.5090, -6.5086, -6.5090, -6.5090, -6.5089, -6.5090, -6.5088, -6.5090,
-6.5089, -6.5090, -6.5090, -6.5097, -6.5086, -6.5089, -6.5092, -6.5089,
-6.5088, -6.5090, -6.5090, -6.5088, -6.5090, -6.5091, -6.5087, -6.5089],
dtype=torch.float32),
("cuda", None): torch.tensor([0.1966, 6.3449, 3.8350, -5.7291, -6.5106, -6.5104, -6.5103, -6.5104,
-6.5103, -6.5104, -6.5106, -6.5105, 7.8700, 13.5434, -6.5104, -6.5096,
-6.5106, -6.5102, -6.5106, -6.5106, -6.5105, -6.5106, -6.5104, -6.5106,
-6.5105, -6.5106, -6.5106, -6.5113, -6.5102, -6.5105, -6.5108, -6.5105,
-6.5104, -6.5106, -6.5106, -6.5104, -6.5106, -6.5107, -6.5103, -6.5105],
dtype=torch.float32),
}
) # fmt: skip
EXPECTED_LOGITS_NO_GRAD_1 = EXPECTED_LOGITS_NO_GRAD_1S.get_expectation()
torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_0, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(
logits[1, -1, :40].cpu(),
EXPECTED_LOGITS_NO_GRAD_1,
rtol=1e-3,
atol=6e-3 if torch_device == "cpu" else 1e-3,
)
| transformers/tests/models/zamba2/test_modeling_zamba2.py/0 | {
"file_path": "transformers/tests/models/zamba2/test_modeling_zamba2.py",
"repo_id": "transformers",
"token_count": 14984
} | 618 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pytest
from transformers import (
MODEL_MAPPING,
TF_MODEL_MAPPING,
TOKENIZER_MAPPING,
ImageFeatureExtractionPipeline,
is_torch_available,
is_vision_available,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@is_pipeline_test
class ImageFeatureExtractionPipelineTests(unittest.TestCase):
model_mapping = MODEL_MAPPING
tf_model_mapping = TF_MODEL_MAPPING
@require_torch
def test_small_model_pt(self):
feature_extractor = pipeline(
task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt"
)
img = prepare_img()
outputs = feature_extractor(img)
self.assertEqual(
nested_simplify(outputs[0][0]),
[-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip
@require_torch
def test_small_model_w_pooler_pt(self):
feature_extractor = pipeline(
task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit-w-pooler", framework="pt"
)
img = prepare_img()
outputs = feature_extractor(img, pool=True)
self.assertEqual(
nested_simplify(outputs[0]),
[-0.056, 0.083, 0.021, 0.038, 0.242, -0.279, -0.033, -0.003, 0.200, -0.192, 0.045, -0.095, -0.077, 0.017, -0.058, -0.063, -0.029, -0.204, 0.014, 0.042, 0.305, -0.205, -0.099, 0.146, -0.287, 0.020, 0.168, -0.052, 0.046, 0.048, -0.156, 0.093]) # fmt: skip
@require_torch
def test_image_processing_small_model_pt(self):
feature_extractor = pipeline(
task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt"
)
# test with image processor parameters
image_processor_kwargs = {"size": {"height": 300, "width": 300}}
img = prepare_img()
with pytest.raises(ValueError):
# Image doesn't match model input size
feature_extractor(img, image_processor_kwargs=image_processor_kwargs)
image_processor_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]}
img = prepare_img()
outputs = feature_extractor(img, image_processor_kwargs=image_processor_kwargs)
self.assertEqual(np.squeeze(outputs).shape, (226, 32))
# Test pooling option
outputs = feature_extractor(img, pool=True)
self.assertEqual(np.squeeze(outputs).shape, (32,))
@require_torch
def test_return_tensors_pt(self):
feature_extractor = pipeline(
task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt"
)
img = prepare_img()
outputs = feature_extractor(img, return_tensors=True)
self.assertTrue(torch.is_tensor(outputs))
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
if image_processor is None:
self.skipTest(reason="No image processor")
elif type(model.config) in TOKENIZER_MAPPING:
self.skipTest(
reason="This is a bimodal model, we need to find a more consistent way to switch on those models."
)
elif model.config.is_encoder_decoder:
self.skipTest(
"""encoder_decoder models are trickier for this pipeline.
Do we want encoder + decoder inputs to get some features?
Do we want encoder only features ?
For now ignore those.
"""
)
feature_extractor_pipeline = ImageFeatureExtractionPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
)
img = prepare_img()
return feature_extractor_pipeline, [img, img]
def run_pipeline_test(self, feature_extractor, examples):
imgs = examples
outputs = feature_extractor(imgs[0])
self.assertEqual(len(outputs), 1)
outputs = feature_extractor(imgs)
self.assertEqual(len(outputs), 2)
| transformers/tests/pipelines/test_pipelines_image_feature_extraction.py/0 | {
"file_path": "transformers/tests/pipelines/test_pipelines_image_feature_extraction.py",
"repo_id": "transformers",
"token_count": 2384
} | 619 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from huggingface_hub import VideoClassificationOutputElement, hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
compare_pipeline_output_to_hub_spec,
is_pipeline_test,
nested_simplify,
require_av,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_av
class VideoClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
example_video_filepath = None
@classmethod
def _load_dataset(cls):
# Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process.
if cls.example_video_filepath is None:
cls.example_video_filepath = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset"
)
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
self._load_dataset()
video_classifier = VideoClassificationPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
top_k=2,
)
examples = [
self.example_video_filepath,
# TODO: re-enable this once we have a stable hub solution for CI
# "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def run_pipeline_test(self, video_classifier, examples):
for example in examples:
outputs = video_classifier(example)
self.assertEqual(
outputs,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
for element in outputs:
compare_pipeline_output_to_hub_spec(element, VideoClassificationOutputElement)
@require_torch
def test_small_model_pt(self):
small_model = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
small_feature_extractor = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10}
)
video_classifier = pipeline(
"video-classification", model=small_model, feature_extractor=small_feature_extractor, frame_sampling_rate=4
)
video_file_path = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset")
output = video_classifier(video_file_path, top_k=2)
self.assertEqual(
nested_simplify(output, decimals=4),
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
)
for element in output:
compare_pipeline_output_to_hub_spec(element, VideoClassificationOutputElement)
outputs = video_classifier(
[
video_file_path,
video_file_path,
],
top_k=2,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
],
)
for output in outputs:
for element in output:
compare_pipeline_output_to_hub_spec(element, VideoClassificationOutputElement)
| transformers/tests/pipelines/test_pipelines_video_classification.py/0 | {
"file_path": "transformers/tests/pipelines/test_pipelines_video_classification.py",
"repo_id": "transformers",
"token_count": 2024
} | 620 |
# Copyright 2022 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import importlib.metadata
import tempfile
import unittest
import pytest
from packaging import version
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
set_seed,
)
from transformers.models.opt.modeling_opt import OPTAttention
from transformers.testing_utils import (
apply_skip_if_not_implemented,
backend_empty_cache,
backend_torch_accelerator_module,
is_bitsandbytes_available,
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu_if_bnb_not_multi_backend_enabled,
require_torch_multi_accelerator,
slow,
torch_device,
)
def get_some_linear_layer(model):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
elif model.config.model_type == "opt":
try:
return model.decoder.layers[0].fc1
except AttributeError:
# for AutoModelforCausalLM
return model.model.decoder.layers[0].fc1
elif model.config.model_type == "llama":
return model.model.layers[0].mlp.gate_proj
else:
return model.transformer.h[0].mlp.dense_4h_to_h
if is_torch_available():
import torch
import torch.nn as nn
class LoRALayer(nn.Module):
"""Wraps a linear layer with LoRA-like adapter - Used for testing purposes only"""
def __init__(self, module: nn.Module, rank: int):
super().__init__()
self.module = module
self.adapter = nn.Sequential(
nn.Linear(module.in_features, rank, bias=False),
nn.Linear(rank, module.out_features, bias=False),
)
small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight, std=small_std)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def forward(self, input, *args, **kwargs):
return self.module(input, *args, **kwargs) + self.adapter(input)
if is_bitsandbytes_available():
import bitsandbytes as bnb
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu_if_bnb_not_multi_backend_enabled
@slow
class Base4bitTest(unittest.TestCase):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
model_name = "bigscience/bloom-1b7"
# Constant values
EXPECTED_RELATIVE_DIFFERENCE = (
2.109659552692574 # This was obtained on a RTX Titan so the number might slightly change
)
input_text = "Hello my name is"
EXPECTED_OUTPUTS = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I")
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n")
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University")
EXPECTED_OUTPUTS.add("Hello my name is John and I am 25 years old.")
EXPECTED_OUTPUTS.add("Hello my name is John and I am a student at the University of")
# Expected values on Intel XPU and NV A100
EXPECTED_OUTPUTS.add("Hello my name is Alina. I have been working as a professional")
MAX_NEW_TOKENS = 10
def setUp(self):
# Models and tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
@apply_skip_if_not_implemented
class Bnb4BitTest(Base4bitTest):
def setUp(self):
super().setUp()
# Models and tokenizer
self.model_fp16 = AutoModelForCausalLM.from_pretrained(self.model_name, dtype=torch.float16, device_map="auto")
self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto")
def tearDown(self):
r"""
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
del self.model_fp16
del self.model_4bit
gc.collect()
backend_empty_cache(torch_device)
def test_quantization_num_parameters(self):
r"""
Test if the number of returned parameters is correct
See: https://github.com/huggingface/transformers/issues/25978
"""
num_params_4bit = self.model_4bit.num_parameters()
num_params_fp16 = self.model_fp16.num_parameters()
self.assertEqual(num_params_4bit, num_params_fp16)
def test_quantization_config_json_serialization(self):
r"""
A simple test to check if the quantization config is correctly serialized and deserialized
"""
config = self.model_4bit.config
self.assertTrue(hasattr(config, "quantization_config"))
_ = config.to_dict()
_ = config.to_diff_dict()
_ = config.to_json_string()
def test_memory_footprint(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model and the class type of the linear layers of the converted models
"""
from bitsandbytes.nn import Params4bit
mem_fp16 = self.model_fp16.get_memory_footprint()
mem_4bit = self.model_4bit.get_memory_footprint()
self.assertAlmostEqual(mem_fp16 / mem_4bit, self.EXPECTED_RELATIVE_DIFFERENCE, delta=1e-5)
linear = get_some_linear_layer(self.model_4bit)
self.assertTrue(linear.weight.__class__ == Params4bit)
def test_original_dtype(self):
r"""
A simple test to check if the model successfully stores the original dtype
"""
self.assertTrue(hasattr(self.model_4bit.config, "_pre_quantization_dtype"))
self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype"))
self.assertTrue(self.model_4bit.config._pre_quantization_dtype == torch.float16)
def test_linear_are_4bit(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model and the class type of the linear layers of the converted models
"""
from transformers import T5PreTrainedModel
self.model_fp16.get_memory_footprint()
self.model_4bit.get_memory_footprint()
for name, module in self.model_4bit.named_modules():
if isinstance(module, torch.nn.Linear):
if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uint8)
def test_rwkv_4bit(self):
r"""
A simple test to check if 4-bit RWKV inference works as expected.
"""
model_id = "RWKV/rwkv-4-169m-pile"
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True)
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config)
tok = AutoTokenizer.from_pretrained(model_id)
text = "Hello my name is"
input_ids = tok.encode(text, return_tensors="pt").to(torch_device)
_ = model.generate(input_ids, max_new_tokens=30)
def test_generate_quality(self):
r"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
output_sequences = self.model_4bit.generate(
input_ids=encoded_input["input_ids"].to(self.model_4bit.device), max_new_tokens=10
)
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_generate_quality_config(self):
r"""
Test that loading the model with the config is equivalent
"""
bnb_config = BitsAndBytesConfig()
bnb_config.load_in_4bit = True
model_4bit_from_config = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=bnb_config, device_map="auto"
)
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
output_sequences = model_4bit_from_config.generate(
input_ids=encoded_input["input_ids"].to(model_4bit_from_config.device), max_new_tokens=10
)
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_generate_quality_dequantize(self):
r"""
Test that loading the model and unquantize it produce correct results
"""
bnb_config = BitsAndBytesConfig(load_in_4bit=True)
model_4bit = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=bnb_config, device_map="auto"
)
model_4bit.dequantize()
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
output_sequences = model_4bit.generate(
input_ids=encoded_input["input_ids"].to(model_4bit.device), max_new_tokens=10
)
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_clear_quantization_trace(self):
r"""
Test that dequantizing the model won't leave any attribute relative to quantization in the model's configuration
"""
bnb_config = BitsAndBytesConfig(load_in_4bit=True)
model_4bit = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=bnb_config, device_map="auto"
)
model_4bit.dequantize()
self.assertFalse(hasattr(model_4bit, "hf_quantizer"))
self.assertFalse(hasattr(model_4bit.config, "quantization_config"))
self.assertFalse(hasattr(model_4bit.config, "_pre_quantization_dtype"))
self.assertFalse(hasattr(model_4bit, "quantization_method"))
self.assertFalse(model_4bit.is_quantized)
def test_to_device_dequantized(self):
r"""
Test that dequantizing the model won't prevent converting it to a different dtype
"""
bnb_config = BitsAndBytesConfig(load_in_4bit=True)
model_4bit = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=bnb_config, device_map="auto"
)
model_4bit.dequantize()
model_4bit.to(dtype=torch.float16)
def test_device_assignment(self):
if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.2"):
self.skipTest(reason="This test requires bitsandbytes >= 0.43.2")
mem_before = self.model_4bit.get_memory_footprint()
# Move to CPU
self.model_4bit.to("cpu")
self.assertEqual(self.model_4bit.device.type, "cpu")
self.assertAlmostEqual(self.model_4bit.get_memory_footprint(), mem_before)
if torch_device in ["cuda", "xpu"]:
# Move back to CUDA device
self.model_4bit.to(torch_device)
self.assertEqual(self.model_4bit.device.type, torch_device)
self.assertAlmostEqual(self.model_4bit.get_memory_footprint(), mem_before)
def test_device_and_dtype_assignment(self):
r"""
Test whether attempting to change the device or cast the dtype of a model
after converting it to 4-bit precision will raise an appropriate error.
The test ensures that such operations are prohibited on 4-bit models
to prevent invalid conversions.
"""
# Moving with `to` or `cuda` is not supported with versions < 0.43.2.
if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.2"):
with self.assertRaises(ValueError):
# Tries with `str`
self.model_4bit.to("cpu")
with self.assertRaises(ValueError):
# Tries with a `device`
self.model_4bit.to(torch.device("cuda:0"))
with self.assertRaises(ValueError):
# Tries with `cuda`
self.model_4bit.cuda()
with self.assertRaises(ValueError):
# Tries with a `dtype`
self.model_4bit.to(torch.float16)
with self.assertRaises(ValueError):
# Tries to cast the 4-bit model to float32 using `float()`
self.model_4bit.float()
with self.assertRaises(ValueError):
# Tries to cast the 4-bit model to float16 using `half()`
self.model_4bit.half()
# Test if we did not break anything
self.model_4bit.to(torch.device(torch_device))
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
self.model_fp16 = self.model_fp16.to(torch.float32)
_ = self.model_fp16.generate(
input_ids=encoded_input["input_ids"].to(self.model_fp16.device), max_new_tokens=10
)
if torch_device in ["cuda", "xpu"]:
# Check that this does not throw an error
_ = self.model_fp16.to(torch_device)
# Check this does not throw an error
_ = self.model_fp16.to("cpu")
# Check this does not throw an error
_ = self.model_fp16.half()
# Check this does not throw an error
_ = self.model_fp16.float()
def test_fp32_4bit_conversion(self):
r"""
Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly.
"""
model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small", load_in_4bit=True, device_map="auto")
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32)
def test_bnb_4bit_wrong_config(self):
r"""
Test whether creating a bnb config with unsupported values leads to errors.
"""
with self.assertRaises(ValueError):
_ = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_storage="add")
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu_if_bnb_not_multi_backend_enabled
@slow
@apply_skip_if_not_implemented
class Bnb4BitT5Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model_name = "google-t5/t5-small"
cls.dense_act_model_name = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.input_text = "Translate in German: Hello, my dog is cute"
def tearDown(self):
r"""
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
gc.collect()
backend_empty_cache(torch_device)
def test_inference_without_keep_in_fp32(self):
r"""
Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly.
`flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test
both cases.
"""
from transformers import T5ForConditionalGeneration
modules = T5ForConditionalGeneration._keep_in_fp32_modules
T5ForConditionalGeneration._keep_in_fp32_modules = None
# test with `google-t5/t5-small`
model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto")
encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device)
_ = model.generate(**encoded_input)
# test with `flan-t5-small`
model = T5ForConditionalGeneration.from_pretrained(
self.dense_act_model_name, load_in_4bit=True, device_map="auto"
)
encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device)
_ = model.generate(**encoded_input)
T5ForConditionalGeneration._keep_in_fp32_modules = modules
def test_inference_with_keep_in_fp32(self):
r"""
Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly.
`flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test
both cases.
"""
from transformers import T5ForConditionalGeneration
# test with `google-t5/t5-small`
model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto")
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear4bit))
encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device)
_ = model.generate(**encoded_input)
# test with `flan-t5-small`
model = T5ForConditionalGeneration.from_pretrained(
self.dense_act_model_name, load_in_4bit=True, device_map="auto"
)
encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device)
_ = model.generate(**encoded_input)
@apply_skip_if_not_implemented
class Classes4BitModelTest(Base4bitTest):
def setUp(self):
super().setUp()
# model_name
self.model_name = "bigscience/bloom-560m"
self.seq_to_seq_name = "google-t5/t5-small"
# Different types of model
self.base_model = AutoModel.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto")
# Sequence classification model
self.sequence_model = AutoModelForSequenceClassification.from_pretrained(
self.model_name, load_in_4bit=True, device_map="auto"
)
# CausalLM model
self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True, device_map="auto")
# Seq2seq model
self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained(
self.seq_to_seq_name, load_in_4bit=True, device_map="auto"
)
def tearDown(self):
r"""
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
del self.base_model
del self.sequence_model
del self.model_4bit
del self.seq_to_seq_model
gc.collect()
backend_empty_cache(torch_device)
def test_correct_head_class(self):
r"""
A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification)
are kept in their native class.
"""
from bitsandbytes.nn import Params4bit
self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Params4bit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_4bit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
@apply_skip_if_not_implemented
class Pipeline4BitTest(Base4bitTest):
def setUp(self):
super().setUp()
def tearDown(self):
r"""
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
if hasattr(self, "pipe"):
del self.pipe
gc.collect()
backend_empty_cache(torch_device)
def test_pipeline(self):
r"""
The aim of this test is to verify that the mixed 4bit is compatible with `pipeline` from transformers. Since
we used pipeline for inference speed benchmarking we want to make sure that this feature does not break anything
on pipeline.
"""
# self._clear_cuda_cache()
self.pipe = pipeline(
"text-generation",
model=self.model_name,
model_kwargs={
"device_map": "auto",
"load_in_4bit": True,
# float16 isn't supported on CPU, use bfloat16 instead
"dtype": torch.bfloat16 if torch_device == "cpu" else torch.float16,
},
max_new_tokens=self.MAX_NEW_TOKENS,
)
# Avoid sampling different outputs
set_seed(42)
# Real second forward pass
pipeline_output = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS)
@require_torch_multi_accelerator
@apply_skip_if_not_implemented
class Bnb4bitTestMultiAccelerator(Base4bitTest):
def setUp(self):
super().setUp()
def test_multi_accelerator_loading(self):
r"""
This tests that the model has been loaded and can be used correctly on a multi-accelerator setup.
Let's just try to load a model on 2 accelerators and see if it works. The model we test has ~2GB of total, 3GB should suffice
"""
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": 0,
"lm_head": 0,
"transformer.h.0": 0,
"transformer.h.1": 0,
"transformer.h.2": 0,
"transformer.h.3": 0,
"transformer.h.4": 0,
"transformer.h.5": 0,
"transformer.h.6": 0,
"transformer.h.7": 0,
"transformer.h.8": 0,
"transformer.h.9": 0,
"transformer.h.10": 1,
"transformer.h.11": 1,
"transformer.h.12": 1,
"transformer.h.13": 1,
"transformer.h.14": 1,
"transformer.h.15": 1,
"transformer.h.16": 1,
"transformer.h.17": 0,
"transformer.h.18": 0,
"transformer.h.19": 0,
"transformer.h.20": 0,
"transformer.h.21": 0,
"transformer.h.22": 0,
"transformer.h.23": 1,
"transformer.ln_f": 0,
}
model_parallel = AutoModelForCausalLM.from_pretrained(
self.model_name, load_in_4bit=True, device_map=device_map
)
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1})
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Second real batch
output_parallel = model_parallel.generate(
input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10
)
self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
@apply_skip_if_not_implemented
class Bnb4BitTestTraining(Base4bitTest):
def setUp(self):
self.model_name = "facebook/opt-350m"
super().setUp()
def test_training(self):
if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"):
self.skipTest(reason="This test requires bitsandbytes >= 0.37.0")
# Step 1: freeze all parameters
model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True)
if torch_device in ["cuda", "xpu"]:
self.assertEqual(
set(model.hf_device_map.values()), {backend_torch_accelerator_module(torch_device).current_device()}
)
else:
self.assertTrue(all(param.device.type == "cpu" for param in model.parameters()))
for param in model.parameters():
param.requires_grad = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
param.data = param.data.to(torch.float32)
# Step 2: add adapters
for _, module in model.named_modules():
if isinstance(module, OPTAttention):
module.q_proj = LoRALayer(module.q_proj, rank=16)
module.k_proj = LoRALayer(module.k_proj, rank=16)
module.v_proj = LoRALayer(module.v_proj, rank=16)
# Step 3: dummy batch
batch = self.tokenizer("Test batch ", return_tensors="pt").to(torch_device)
# Step 4: Check if the gradient is not None
with torch.autocast(torch_device):
out = model.forward(**batch)
out.logits.norm().backward()
for module in model.modules():
if isinstance(module, LoRALayer):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(module, nn.Embedding):
self.assertTrue(module.weight.grad is None)
@apply_skip_if_not_implemented
class Bnb4BitGPT2Test(Bnb4BitTest):
model_name = "openai-community/gpt2-xl"
EXPECTED_RELATIVE_DIFFERENCE = 3.3191854854152187
@apply_skip_if_not_implemented
class Bnb4BitLlamaTest(Bnb4BitTest):
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
EXPECTED_RELATIVE_DIFFERENCE = 2.9461410686392764
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu_if_bnb_not_multi_backend_enabled
@slow
@apply_skip_if_not_implemented
class BaseSerializationTest(unittest.TestCase):
model_name = "facebook/opt-125m"
input_text = "Mars colonists' favorite meals are"
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
def test_serialization(self, quant_type="nf4", double_quant=True, safe_serialization=True):
r"""
Test whether it is possible to serialize a model in 4-bit. Uses most typical params as default.
See ExtendedSerializationTest class for more params combinations.
"""
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type=quant_type,
bnb_4bit_use_double_quant=double_quant,
bnb_4bit_compute_dtype=torch.bfloat16,
)
model_0 = AutoModelForCausalLM.from_pretrained(
self.model_name,
quantization_config=self.quantization_config,
device_map=torch_device,
)
with tempfile.TemporaryDirectory() as tmpdirname:
model_0.save_pretrained(tmpdirname, safe_serialization=safe_serialization)
config = AutoConfig.from_pretrained(tmpdirname)
self.assertTrue(hasattr(config, "quantization_config"))
model_1 = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=torch_device)
# checking quantized linear module weight
linear = get_some_linear_layer(model_1)
self.assertTrue(linear.weight.__class__ == bnb.nn.Params4bit)
self.assertTrue(hasattr(linear.weight, "quant_state"))
self.assertTrue(linear.weight.quant_state.__class__ == bnb.functional.QuantState)
# checking memory footpring
self.assertAlmostEqual(model_0.get_memory_footprint() / model_1.get_memory_footprint(), 1, places=2)
# Matching all parameters and their quant_state items:
d0 = dict(model_0.named_parameters())
d1 = dict(model_1.named_parameters())
self.assertTrue(d0.keys() == d1.keys())
for k in d0:
self.assertTrue(d0[k].shape == d1[k].shape)
self.assertTrue(d0[k].device.type == d1[k].device.type)
self.assertTrue(d0[k].device == d1[k].device)
self.assertTrue(d0[k].dtype == d1[k].dtype)
self.assertTrue(torch.equal(d0[k], d1[k].to(d0[k].device)))
if isinstance(d0[k], bnb.nn.modules.Params4bit):
for v0, v1 in zip(
d0[k].quant_state.as_dict().values(),
d1[k].quant_state.as_dict().values(),
):
if isinstance(v0, torch.Tensor):
# The absmax will not be saved in the quant_state when using NF4 in CPU
if v0.numel() != 0:
self.assertTrue(torch.equal(v0, v1.to(v0.device)))
else:
self.assertTrue(v0 == v1)
# comparing forward() outputs
encoded_input = tokenizer(self.input_text, return_tensors="pt").to(torch_device)
out_0 = model_0(**encoded_input)
out_1 = model_1(**encoded_input)
torch.testing.assert_close(out_0["logits"], out_1["logits"], rtol=0.05, atol=0.05)
# comparing generate() outputs
encoded_input = tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output_sequences_0 = model_0.generate(**encoded_input, max_new_tokens=10)
output_sequences_1 = model_1.generate(**encoded_input, max_new_tokens=10)
def _decode(token):
return tokenizer.decode(token, skip_special_tokens=True)
self.assertEqual(
[_decode(x) for x in output_sequences_0],
[_decode(x) for x in output_sequences_1],
)
@apply_skip_if_not_implemented
class ExtendedSerializationTest(BaseSerializationTest):
"""
tests more combinations of parameters
"""
def test_nf4_single_unsafe(self):
self.test_serialization(quant_type="nf4", double_quant=False, safe_serialization=False)
def test_nf4_single_safe(self):
self.test_serialization(quant_type="nf4", double_quant=False, safe_serialization=True)
def test_nf4_double_unsafe(self):
self.test_serialization(quant_type="nf4", double_quant=True, safe_serialization=False)
# nf4 double safetensors quantization is tested in test_serialization() method from the parent class
def test_fp4_single_unsafe(self):
self.test_serialization(quant_type="fp4", double_quant=False, safe_serialization=False)
def test_fp4_single_safe(self):
self.test_serialization(quant_type="fp4", double_quant=False, safe_serialization=True)
def test_fp4_double_unsafe(self):
self.test_serialization(quant_type="fp4", double_quant=True, safe_serialization=False)
def test_fp4_double_safe(self):
self.test_serialization(quant_type="fp4", double_quant=True, safe_serialization=True)
class BloomSerializationTest(BaseSerializationTest):
"""
default BaseSerializationTest config tested with Bloom family model
"""
model_name = "bigscience/bloom-560m"
class GPTSerializationTest(BaseSerializationTest):
"""
default BaseSerializationTest config tested with GPT family model
"""
model_name = "openai-community/gpt2-xl"
class LlamaSerializationTest(BaseSerializationTest):
"""
default BaseSerializationTest config tested with Llama family model
"""
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
@require_bitsandbytes
@require_accelerate
@require_torch_gpu_if_bnb_not_multi_backend_enabled
@slow
@apply_skip_if_not_implemented
class Bnb4BitTestBasicConfigTest(unittest.TestCase):
def test_load_in_4_and_8_bit_fails(self):
with self.assertRaisesRegex(ValueError, "load_in_4bit and load_in_8bit are both True"):
AutoModelForCausalLM.from_pretrained("facebook/opt-125m", load_in_4bit=True, load_in_8bit=True)
def test_set_load_in_8_bit(self):
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
with self.assertRaisesRegex(ValueError, "load_in_4bit and load_in_8bit are both True"):
quantization_config.load_in_8bit = True
@require_bitsandbytes
@require_accelerate
@require_torch_gpu_if_bnb_not_multi_backend_enabled
@slow
@apply_skip_if_not_implemented
class Bnb4bitCompile(unittest.TestCase):
model_name = "hf-internal-testing/tiny-random-LlamaForCausalLM"
input_text = "Hello my name is"
def setUp(self):
# Models and tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_4bit=True)
@pytest.mark.torch_compile_test
def test_generate_compile(self):
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# if nothing is set, compile will be disabled for bnb
self.model_4bit.generate(
input_ids=encoded_input["input_ids"].to(self.model_4bit.device),
max_new_tokens=10,
cache_implementation="static",
)
with self.assertRaises(Exception):
# overwrite property
object.__setattr__(self.model_4bit.hf_quantizer, "is_compileable", True)
self.model_4bit.generate(
input_ids=encoded_input["input_ids"].to(self.model_4bit.device),
max_new_tokens=10,
cache_implementation="static",
)
| transformers/tests/quantization/bnb/test_4bit.py/0 | {
"file_path": "transformers/tests/quantization/bnb/test_4bit.py",
"repo_id": "transformers",
"token_count": 14869
} | 621 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import pytest
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, GPTQConfig
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_gptq,
require_optimum,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.utils import is_auto_gptq_available, is_gptqmodel_available, is_ipex_available
if is_torch_available():
import torch
class GPTQConfigTest(unittest.TestCase):
def test_bits(self):
with self.assertRaises(ValueError):
GPTQConfig(bits="")
GPTQConfig(bits=1)
GPTQConfig(bits=2)
GPTQConfig(bits=4)
def test_dataset(self):
with self.assertRaises(ValueError):
GPTQConfig(bits=2, dataset="auto_gpt")
GPTQConfig(bits=2, dataset="c4")
def test_damp_percent(self):
with self.assertRaises(ValueError):
GPTQConfig(bits=2, damp_percent=10)
GPTQConfig(bits=2, damp_percent=-1)
GPTQConfig(bits=2, damp_percent="0")
GPTQConfig(bits=2, damp_percent=0.01)
def test_to_dict(self):
quantization_config = GPTQConfig(bits=2)
quantization_config.to_dict()
def test_from_dict(self):
dict = {"bits": 2}
quantization_config = GPTQConfig.from_dict(dict)
self.assertEqual(dict["bits"], quantization_config.bits)
@require_optimum
def test_optimum_config(self):
from optimum.gptq import GPTQQuantizer
config = GPTQConfig(bits=2)
optimum_config = GPTQQuantizer.from_dict(config.to_dict_optimum())
self.assertEqual(optimum_config.bits, config.bits)
new_config = GPTQConfig.from_dict_optimum(optimum_config.to_dict())
self.assertEqual(optimum_config.bits, new_config.bits)
@slow
@require_optimum
@require_gptq
class GPTQTest(unittest.TestCase):
model_name = "bigscience/bloom-560m"
input_text = "Hello my name is"
EXPECTED_OUTPUTS = set()
# flaky test: gptqmodel and auto-gptq are not output equivalent nor is string compare deterministic even between transformer/torch versions
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I")
EXPECTED_OUTPUTS.add("Hello my name is John, I am a professional photographer and I")
EXPECTED_OUTPUTS.add("Hello my name is John, I am a student in the University of")
EXPECTED_OUTPUTS.add("Hello my name is John and I am a very good looking man.")
EXPECTED_OUTPUTS.add("Hello my name is Alyson, I am a student in the")
EXPECTED_OUTPUTS.add("Hello my name is Alyson and I am a very sweet,")
EXPECTED_OUTPUTS.add("Hello my name is Aiden, I am a student at the University")
EXPECTED_OUTPUTS.add("Hello my name is Nate and I am a member of the N")
EXPECTED_OUTPUTS.add("Hello my name is Nellie and I am a student at the")
EXPECTED_OUTPUTS.add("Hello my name is Nate and I am a new member of the")
# this seems a little small considering that we are doing 4bit quant but we have a small model and ww don't quantize the embeddings
EXPECTED_RELATIVE_DIFFERENCE = 1.664253062
bits = 4
sym = True
group_size = 128
desc_act = False
use_exllama = False
dataset = [
"auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."
]
device_map = "cpu" if is_gptqmodel_available() else None
# called only once for all test in this class
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
cls.model_fp16 = AutoModelForCausalLM.from_pretrained(
cls.model_name, dtype=torch.float16, device_map=cls.device_map
)
cls.mem_fp16 = cls.model_fp16.get_memory_footprint()
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True)
cls.config = AutoConfig.from_pretrained(cls.model_name)
cls.quantization_config = GPTQConfig(
bits=cls.bits,
dataset=cls.dataset,
tokenizer=cls.tokenizer,
group_size=cls.group_size,
desc_act=cls.desc_act,
sym=cls.sym,
use_exllama=cls.use_exllama,
)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
dtype=torch.float16,
device_map=cls.device_map,
quantization_config=cls.quantization_config,
)
def test_memory_footprint(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model
"""
mem_quantized = self.quantized_model.get_memory_footprint()
self.assertAlmostEqual(self.mem_fp16 / mem_quantized, self.EXPECTED_RELATIVE_DIFFERENCE, places=4)
def test_device_and_dtype_assignment(self):
r"""
Test whether trying to cast (or assigning a device to) a model after quantization will throw an error.
Checks also if other models are casted correctly.
"""
# This should work
if self.device_map in (None, "cpu"):
_ = self.quantized_model.to(0)
with self.assertRaises(ValueError):
# Tries with a `dtype``
self.quantized_model.to(torch.float16)
def test_original_dtype(self):
r"""
A simple test to check if the model successfully stores the original dtype
"""
self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype"))
self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype"))
self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16)
def test_quantized_layers_class(self):
"""
Simple test to check if the model conversion has been done correctly by checking on
the class type of the linear layers of the converted models
"""
if is_gptqmodel_available():
from gptqmodel.utils.importer import hf_select_quant_linear
if hasattr(self.config, "quantization_config"):
checkpoint_format = self.config.quantization_config.get("checkpoint_format")
meta = self.config.quantization_config.get("meta")
else:
checkpoint_format = "gptq"
meta = None
QuantLinear = hf_select_quant_linear(
bits=self.bits,
group_size=self.group_size,
desc_act=self.desc_act,
sym=self.sym,
device_map=self.device_map,
checkpoint_format=checkpoint_format,
meta=meta,
backend=self.quantization_config.backend,
)
elif is_auto_gptq_available():
from auto_gptq.utils.import_utils import dynamically_import_QuantLinear as hf_select_quant_linear
QuantLinear = hf_select_quant_linear(
use_triton=False,
desc_act=self.desc_act,
group_size=self.group_size,
bits=self.bits,
disable_exllama=not self.use_exllama,
disable_exllamav2=True,
)
self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear)
def check_inference_correctness(self, model):
r"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Check the exactness of the results
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(model.device), max_new_tokens=10)
# Get the generation
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def check_quantized_layers_type(self, model, value):
self.assertTrue(model.transformer.h[0].mlp.dense_4h_to_h.QUANT_TYPE == value)
def test_generate_quality(self):
"""
Simple test to check the quality of the model by comparing the generated tokens with the expected tokens
"""
if self.device_map is None:
self.check_inference_correctness(self.quantized_model.to(0))
else:
if self.device_map == "cpu" and self.quantized_model.device.type != "cpu":
self.quantized_model.to("cpu")
self.check_inference_correctness(self.quantized_model)
def test_serialization(self):
"""
Test the serialization of the model and the loading of the quantized weights works
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
if is_auto_gptq_available() and not is_gptqmodel_available():
quant_type = "cuda-old" if not self.use_exllama else "exllama"
if not self.use_exllama:
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname, quantization_config=GPTQConfig(use_exllama=False, bits=4)
)
if self.device_map != "cpu":
quantized_model_from_saved = quantized_model_from_saved.to(0)
else:
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname, device_map=self.device_map
)
else:
if self.device_map == "cpu":
quant_type = "ipex" if is_ipex_available() else "torch"
else:
# We expect tritonv2 to be used here, because exllama backend doesn't support packing https://github.com/ModelCloud/GPTQModel/issues/1354
# TODO: Remove this once GPTQModel exllama kernels supports packing
quant_type = "tritonv2"
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname, device_map=self.device_map
)
self.check_quantized_layers_type(quantized_model_from_saved, quant_type)
self.check_inference_correctness(quantized_model_from_saved)
@require_accelerate
def test_serialization_big_model_inference(self):
"""
Test the serialization of the model and the loading of the quantized weights with big model inference
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
device_map = self.device_map or "auto"
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=device_map)
self.check_inference_correctness(quantized_model_from_saved)
@require_torch_gpu
class GPTQTestCUDA(GPTQTest):
device_map = {"": 0}
def test_change_loading_attributes(self):
"""
Test the serialization of the model and the loading of the quantized weights works with another config file
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
if is_auto_gptq_available() and not is_gptqmodel_available() and not self.use_exllama:
self.check_quantized_layers_type(self.quantized_model, "cuda-old")
# we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map=self.device_map
)
self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits)
self.check_quantized_layers_type(quantized_model_from_saved, "exllama")
self.check_inference_correctness(quantized_model_from_saved)
@require_accelerate
@require_torch_multi_gpu
class GPTQTestDeviceMap(GPTQTestCUDA):
device_map = "auto"
@require_accelerate
@require_torch_multi_gpu
class GPTQTestDeviceMapExllama(GPTQTestCUDA):
device_map = "auto"
use_exllama = True
@slow
@require_optimum
@require_gptq
@require_torch_gpu
@require_accelerate
class GPTQTestActOrderExllama(unittest.TestCase):
"""
Test GPTQ model with exllama kernel and desc_act=True (also known as act-order).
More information on those arguments here:
https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig
"""
EXPECTED_OUTPUTS = set()
# flaky test: gptqmodel and auto-gptq are not output equivalent nor is string compare deterministic even between transformer/torch versions
EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.")
# 4bit + act_order + 128g
model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ"
input_text = "Hello, how are you ?"
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
dtype=torch.float16,
device_map={"": 0},
quantization_config=cls.quantization_config,
)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True)
def check_inference_correctness(self, model):
"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Check the exactness of the results
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10)
# Get the generation
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_quantized_layers_type(self):
self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllama")
def test_generate_quality(self):
"""
Simple test to check the quality of the model by comparing the generated tokens with the expected tokens
"""
self.check_inference_correctness(self.quantized_model)
def test_max_input_length(self):
"""
Test if the max_input_length works. It modifies the maximum input length that of the model that runs with exllama backend.
"""
prompt = "I am in Paris and" * 1000
inp = self.tokenizer(prompt, return_tensors="pt").to(0)
self.assertTrue(inp["input_ids"].shape[1] > 4028)
with self.assertRaises(RuntimeError) as cm:
self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3)
self.assertTrue("temp_state buffer is too small" in str(cm.exception))
prompt = "I am in Paris and"
inp = self.tokenizer(prompt, return_tensors="pt").to(0)
self.assertTrue(inp["input_ids"].shape[1] < 4028)
self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3)
@slow
@require_optimum
@require_gptq
@require_torch_gpu
@require_accelerate
class GPTQTestExllamaV2(unittest.TestCase):
"""
Test GPTQ model with exllamav2 kernel and desc_act=True (also known as act-order).
More information on those arguments here:
https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig
"""
EXPECTED_OUTPUTS = set()
# flaky test: gptqmodel and auto-gptq are not output equivalent nor is string compare deterministic even between transformer/torch versions
EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.")
# 4bit + act_order + 128g
model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ"
input_text = "Hello, how are you ?"
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
cls.quantization_config = GPTQConfig(bits=4, exllama_config={"version": 2})
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
dtype=torch.float16,
device_map={"": 0},
quantization_config=cls.quantization_config,
)
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True)
def test_quantized_layers_type(self):
if is_auto_gptq_available() and not is_gptqmodel_available():
self.assertEqual(
self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE,
"exllamav2",
)
else:
# We expect tritonv2 to be used here, because exllama backend doesn't support packing https://github.com/ModelCloud/GPTQModel/issues/1354
# TODO: Remove this once GPTQModel exllama kernels supports packing
self.assertEqual(
self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE,
"tritonv2",
)
def check_inference_correctness(self, model):
"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Check the exactness of the results
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10)
# Get the generation
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_generate_quality(self):
"""
Simple test to check the quality of the model by comparing the generated tokens with the expected tokens
"""
self.check_inference_correctness(self.quantized_model)
# fail when run all together
@pytest.mark.skip
@require_accelerate
@require_torch_multi_gpu
class GPTQTestDeviceMapCPUOffload(GPTQTest):
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": 0,
"lm_head": 0,
"transformer.h.0": 0,
"transformer.h.1": 0,
"transformer.h.2": 0,
"transformer.h.3": 0,
"transformer.h.4": 0,
"transformer.h.5": 0,
"transformer.h.6": 0,
"transformer.h.7": 0,
"transformer.h.8": 0,
"transformer.h.9": 0,
"transformer.h.10": 1,
"transformer.h.11": 1,
"transformer.h.12": 1,
"transformer.h.13": 1,
"transformer.h.14": 1,
"transformer.h.15": 1,
"transformer.h.16": 1,
"transformer.h.17": 0,
"transformer.h.18": "cpu",
"transformer.h.19": "cpu",
"transformer.h.20": "cpu",
"transformer.h.21": "cpu",
"transformer.h.22": "cpu",
"transformer.h.23": 1,
"transformer.ln_f": 0,
}
| transformers/tests/quantization/gptq/test_gptq.py/0 | {
"file_path": "transformers/tests/quantization/gptq/test_gptq.py",
"repo_id": "transformers",
"token_count": 8880
} | 622 |
import os
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(ROOT_DIR, "utils"))
import create_dependency_mapping # noqa: E402
# This is equivalent to `all` in the current library state (as of 09/01/2025)
MODEL_ROOT = os.path.join("src", "transformers", "models")
FILES_TO_PARSE = [
os.path.join(MODEL_ROOT, "starcoder2", "modular_starcoder2.py"),
os.path.join(MODEL_ROOT, "gemma", "modular_gemma.py"),
os.path.join(MODEL_ROOT, "olmo2", "modular_olmo2.py"),
os.path.join(MODEL_ROOT, "diffllama", "modular_diffllama.py"),
os.path.join(MODEL_ROOT, "granite", "modular_granite.py"),
os.path.join(MODEL_ROOT, "gemma2", "modular_gemma2.py"),
os.path.join(MODEL_ROOT, "mixtral", "modular_mixtral.py"),
os.path.join(MODEL_ROOT, "olmo", "modular_olmo.py"),
os.path.join(MODEL_ROOT, "rt_detr", "modular_rt_detr.py"),
os.path.join(MODEL_ROOT, "qwen2", "modular_qwen2.py"),
os.path.join(MODEL_ROOT, "qwen3", "modular_qwen3.py"),
os.path.join(MODEL_ROOT, "llava_next_video", "modular_llava_next_video.py"),
os.path.join(MODEL_ROOT, "cohere2", "modular_cohere2.py"),
os.path.join(MODEL_ROOT, "modernbert", "modular_modernbert.py"),
os.path.join(MODEL_ROOT, "colpali", "modular_colpali.py"),
os.path.join(MODEL_ROOT, "deformable_detr", "modular_deformable_detr.py"),
os.path.join(MODEL_ROOT, "aria", "modular_aria.py"),
os.path.join(MODEL_ROOT, "ijepa", "modular_ijepa.py"),
os.path.join(MODEL_ROOT, "bamba", "modular_bamba.py"),
os.path.join(MODEL_ROOT, "dinov2_with_registers", "modular_dinov2_with_registers.py"),
os.path.join(MODEL_ROOT, "instructblipvideo", "modular_instructblipvideo.py"),
os.path.join(MODEL_ROOT, "glm", "modular_glm.py"),
os.path.join(MODEL_ROOT, "phi", "modular_phi.py"),
os.path.join(MODEL_ROOT, "mistral", "modular_mistral.py"),
os.path.join(MODEL_ROOT, "phi3", "modular_phi3.py"),
os.path.join(MODEL_ROOT, "cohere", "modular_cohere.py"),
os.path.join(MODEL_ROOT, "glm4", "modular_glm4.py"),
os.path.join(MODEL_ROOT, "seed_oss", "modular_seed_oss.py"),
]
def appear_after(model1: str, model2: str, priority_list: list[list[str]]) -> bool:
"""Return True if `model1` appear after `model2` in `priority_list`."""
model1_index, model2_index = None, None
for i, level in enumerate(priority_list):
if model1 in level:
model1_index = i
if model2 in level:
model2_index = i
if model1_index is None or model2_index is None:
raise ValueError(f"Model {model1} or {model2} not found in {priority_list}")
return model1_index > model2_index
class ConversionOrderTest(unittest.TestCase):
def test_conversion_order(self):
# Find the order
priority_list, _ = create_dependency_mapping.find_priority_list(FILES_TO_PARSE)
# Extract just the model names (list of lists)
model_priority_list = [[file.split("/")[-2] for file in level] for level in priority_list]
# These are based on what the current library order should be (as of 09/01/2025)
self.assertTrue(appear_after("mixtral", "mistral", model_priority_list))
self.assertTrue(appear_after("gemma2", "gemma", model_priority_list))
self.assertTrue(appear_after("starcoder2", "mistral", model_priority_list))
self.assertTrue(appear_after("olmo2", "olmo", model_priority_list))
self.assertTrue(appear_after("diffllama", "mistral", model_priority_list))
self.assertTrue(appear_after("cohere2", "gemma2", model_priority_list))
self.assertTrue(appear_after("cohere2", "cohere", model_priority_list))
self.assertTrue(appear_after("phi3", "mistral", model_priority_list))
self.assertTrue(appear_after("glm4", "glm", model_priority_list))
| transformers/tests/repo_utils/modular/test_conversion_order.py/0 | {
"file_path": "transformers/tests/repo_utils/modular/test_conversion_order.py",
"repo_id": "transformers",
"token_count": 1690
} | 623 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run the test: CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/tensor_parallel/test_tensor_parallel.py
import os
import tempfile
import textwrap
from transformers import is_torch_available
from transformers.integrations.tensor_parallel import get_packed_weights, repack_weights
from transformers.testing_utils import (
TestCasePlus,
backend_device_count,
require_huggingface_hub_greater_or_equal,
require_torch_multi_accelerator,
torch_device,
torchrun,
)
if is_torch_available():
import torch
class TestTensorParallelUtils(TestCasePlus):
def test_packed_unpacked_conversion(self):
WORLD_SIZE = 2
PACKED_BLOCK_SIZE = 800
SHARDING_DIM = 2
NUM_BLOCKS = 2
original_packed_weights = torch.randn(4, 512, 2 * PACKED_BLOCK_SIZE)
original_packed_weights.get_dtype = lambda: "F32" # get_packed_weights expects PySlice object
empty_param = torch.empty(4, 512, 2 * PACKED_BLOCK_SIZE)
class MockDeviceMesh:
def size(self):
return WORLD_SIZE
mock_mesh = (
MockDeviceMesh()
) # get_packed_weights only calls `.size()`, do this to avoid doing actual distributed run
packed_weights_0 = get_packed_weights(original_packed_weights, empty_param, mock_mesh, 0, SHARDING_DIM)
packed_weights_1 = get_packed_weights(original_packed_weights, empty_param, mock_mesh, 1, SHARDING_DIM)
# simulate all gather of sharded weights
packed_weights = torch.cat([packed_weights_0, packed_weights_1], dim=SHARDING_DIM)
unpacked_weights = repack_weights(packed_weights, SHARDING_DIM, WORLD_SIZE, NUM_BLOCKS)
assert torch.allclose(unpacked_weights, original_packed_weights)
class TestTensorParallel(TestCasePlus):
nproc_per_node = 2
def test_model_forward(self):
script_to_run = textwrap.dedent(
"""
import torch
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = "JackFram/llama-68m"
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto", tp_plan="auto")
torch.distributed.barrier()
has_dtensor = 0
for name, parameter in model.named_parameters():
if isinstance(parameter.data, torch.distributed.tensor.DTensor):
has_dtensor = 1
break
assert has_dtensor == 1, "TP model must has DTensor"
tokenizer = AutoTokenizer.from_pretrained(model_id, legacy=False)
prompt = "Can I help"
inputs = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
outputs = model(inputs)
next_token_logits = outputs[0][:, -1, :]
next_token = torch.argmax(next_token_logits, dim=-1)
response = tokenizer.decode(next_token)
assert response == "with"
torch.distributed.barrier()
torch.distributed.destroy_process_group()
"""
)
torchrun(script_to_run, self.nproc_per_node, env=self.get_env())
def test_model_backward_pass(self):
script_to_run = textwrap.dedent(
"""
import torch
import os
from transformers import AutoModelForCausalLM
from torch import nn
model_id = "JackFram/llama-68m"
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float32, tp_plan="auto")
torch.distributed.barrier()
# Dummy forward and backward pass
# Note that loss.backward() will fail if there is a bug in the TP implementation
inputs = torch.randint(0, model.config.vocab_size, (2, 10), device=model.device)
labels = torch.randint(0, model.config.vocab_size, (2, 10), device=model.device)
loss = model(inputs, labels=labels).loss
loss.backward()
torch.distributed.barrier()
torch.distributed.destroy_process_group()
"""
)
torchrun(script_to_run, self.nproc_per_node, env=self.get_env())
def test_model_generate(self):
script_to_run = textwrap.dedent(
"""
import torch
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = "JackFram/llama-68m"
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto", tp_plan="auto")
torch.distributed.barrier()
model.forward = torch.compile(model.forward)
has_dtensor = 0
for name, parameter in model.named_parameters():
if isinstance(parameter.data, torch.distributed.tensor.DTensor):
has_dtensor = 1
break
assert has_dtensor == 1, "TP model must has DTensor"
tokenizer = AutoTokenizer.from_pretrained(model_id)
prompt = "Can I help"
inputs = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
outputs = model.generate(inputs, max_new_tokens=10, cache_implementation="static")
output_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
assert output_text[0].startswith(prompt), f"Expected output to start with '{prompt}', got '{output_text[0]}'"
torch.distributed.barrier()
torch.distributed.destroy_process_group()
"""
)
torchrun(script_to_run, self.nproc_per_node, env=self.get_env())
@require_huggingface_hub_greater_or_equal("0.31.4")
def test_model_save(self):
from safetensors import safe_open
with tempfile.TemporaryDirectory() as tmp_dir:
for is_torchrun in [True, False]:
script_to_run = textwrap.dedent(
f"""
import torch
import os
from transformers import AutoModelForCausalLM
model_id = "JackFram/llama-68m"
kwargs = dict()
if os.environ.get("RANK", None) is not None:
kwargs["tp_plan"] = "auto"
result_dir = "{tmp_dir}/tp"
else:
result_dir = "{tmp_dir}/nontp"
model = AutoModelForCausalLM.from_pretrained(model_id, **kwargs)
model.save_pretrained(result_dir)
"""
)
torchrun(script_to_run, self.nproc_per_node, is_torchrun=is_torchrun, env=self.get_env())
non_tp_model_path = os.path.join(tmp_dir, "nontp")
tp_model_path = os.path.join(tmp_dir, "tp")
for filename in os.listdir(non_tp_model_path):
if not filename.endswith(".safetensors"):
continue
non_tp_model = safe_open(os.path.join(non_tp_model_path, filename), device="cpu", framework="pt")
tp_model = safe_open(os.path.join(tp_model_path, filename), device="cpu", framework="pt")
for non_tp_key in non_tp_model.keys():
non_tp_tensor = non_tp_model.get_tensor(non_tp_key)
tp_tensor = tp_model.get_tensor(non_tp_key)
assert torch.allclose(non_tp_tensor, tp_tensor), f"Tensor with key: {non_tp_key} does not match"
del non_tp_tensor, tp_tensor
class TestTensorParallelProperties(TestCasePlus):
def test_tp_plan_property_setter_getter(self):
"""Test that tp_plan property can be set and retrieved correctly."""
from transformers import AutoModelForCausalLM
model_id = "JackFram/llama-68m"
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
# Test setting empty plan
model.tp_plan = {}
self.assertEqual(model.tp_plan, {})
# Test setting a valid plan
valid_plan = {"model.layers.*.self_attn.q_proj": "colwise"}
model.tp_plan = valid_plan
self.assertEqual(model.tp_plan, valid_plan)
# Test updating the plan
model.tp_plan.update({"model.layers.*.self_attn.k_proj": "colwise"})
expected_plan = {"model.layers.*.self_attn.q_proj": "colwise", "model.layers.*.self_attn.k_proj": "colwise"}
self.assertEqual(model.tp_plan, expected_plan)
# Test overriding existing entry
model.tp_plan.update({"model.layers.*.self_attn.q_proj": "colwise_rep"})
expected_plan = {
"model.layers.*.self_attn.q_proj": "colwise_rep",
"model.layers.*.self_attn.k_proj": "colwise",
}
self.assertEqual(model.tp_plan, expected_plan)
def test_tp_plan_validation_invalid_style(self):
"""Test that invalid parallel styles are rejected."""
from transformers import AutoModelForCausalLM
model_id = "JackFram/llama-68m"
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
# Test invalid parallel style
with self.assertRaises(ValueError) as context:
model.tp_plan = {"layers.*.self_attn.q_proj": "invalid_style"}
self.assertIn("Unsupported tensor parallel style 'invalid_style'", str(context.exception))
self.assertIn("Supported styles are", str(context.exception))
def test_tp_plan_validation_nonexistent_layer_warning(self):
"""Test that warnings are issued for non-existent layer patterns."""
import warnings
from transformers import AutoModelForCausalLM
model_id = "JackFram/llama-68m"
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
# Test warning for non-existent layer pattern
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
model.tp_plan = {"nonexistent.*.layer": "colwise"}
# Check that a warning was issued
self.assertTrue(len(w) > 0)
warning_message = str(w[0].message)
self.assertIn("Layer pattern 'nonexistent.*.layer' does not match any parameters", warning_message)
def test_tp_plan_valid_layer_patterns(self):
"""Test that valid layer patterns are accepted without warnings."""
import warnings
from transformers import AutoModelForCausalLM
model_id = "JackFram/llama-68m"
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
# Test valid layer patterns that should match the model structure
valid_plans = [
{"model.layers.*.self_attn.q_proj": "colwise"},
{"model.layers.*.self_attn.k_proj": "rowwise"},
{"model.layers.*.mlp.gate_proj": "colwise_rep"},
]
for plan in valid_plans:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
model.tp_plan = plan
# Filter out any warnings that are not about layer patterns
layer_warnings = [
warning
for warning in w
if "Layer pattern" in str(warning.message)
and "does not match any parameters" in str(warning.message)
]
# Should not have layer pattern warnings for valid patterns
self.assertEqual(
len(layer_warnings),
0,
f"Unexpected warning for valid pattern {plan}: {[str(w.message) for w in layer_warnings]}",
)
# Verify the final plan was set correctly
self.assertEqual(model.tp_plan, valid_plans[-1])
def test_tp_plan_none_handling(self):
"""Test that None values are handled correctly."""
from transformers import AutoModelForCausalLM
model_id = "JackFram/llama-68m"
model = AutoModelForCausalLM.from_pretrained(model_id, dtype="auto")
# Test setting None
model.tp_plan = None
self.assertEqual(model.tp_plan, {})
# Test setting a plan after None
model.tp_plan = {"model.layers.*.self_attn.q_proj": "colwise"}
self.assertEqual(model.tp_plan, {"model.layers.*.self_attn.q_proj": "colwise"})
@require_torch_multi_accelerator
class TestTensorParallelAccelerator(TestTensorParallel):
nproc_per_node = backend_device_count(torch_device)
| transformers/tests/tensor_parallel/test_tensor_parallel.py/0 | {
"file_path": "transformers/tests/tensor_parallel/test_tensor_parallel.py",
"repo_id": "transformers",
"token_count": 6028
} | 624 |
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import json
import os
import shutil
import tempfile
import unittest
from transformers import AutoTokenizer, LlamaTokenizerFast, PreTrainedTokenizerFast
from transformers.testing_utils import require_tokenizers
from ..test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class PreTrainedTokenizationFastTest(TokenizerTesterMixin, unittest.TestCase):
rust_tokenizer_class = PreTrainedTokenizerFast
test_slow_tokenizer = False
test_rust_tokenizer = True
from_pretrained_vocab_key = "tokenizer_file"
@classmethod
def setUpClass(cls):
cls.test_rust_tokenizer = False # because we don't have pretrained_vocab_files_map
super().setUpClass()
cls.test_rust_tokenizer = True
model_paths = ["robot-test/dummy-tokenizer-fast", "robot-test/dummy-tokenizer-wordlevel"]
cls.bytelevel_bpe_model_name = "SaulLu/dummy-tokenizer-bytelevel-bpe"
# Inclusion of 2 tokenizers to test different types of models (Unigram and WordLevel for the moment)
cls.tokenizers_list = [(PreTrainedTokenizerFast, model_path, {}) for model_path in model_paths]
tokenizer = PreTrainedTokenizerFast.from_pretrained(model_paths[0])
tokenizer.save_pretrained(cls.tmpdirname)
@unittest.skip(
"We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model"
)
def test_tokenizer_mismatch_warning(self):
pass
@unittest.skip(
"We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model"
)
def test_encode_decode_with_spaces(self):
pass
@unittest.skip(
"We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model"
)
def test_added_tokens_serialization(self):
pass
@unittest.skip(
"We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model"
)
def test_additional_special_tokens_serialization(self):
pass
@unittest.skip(reason="PreTrainedTokenizerFast is the only tokenizer that is not linked to any model")
def test_prepare_for_model(self):
pass
@unittest.skip(reason="PreTrainedTokenizerFast doesn't have tokenizer_file in its signature")
def test_rust_tokenizer_signature(self):
pass
def test_training_new_tokenizer(self):
tmpdirname_orig = self.tmpdirname
# Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel.
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
try:
self.tmpdirname = tempfile.mkdtemp()
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer.save_pretrained(self.tmpdirname)
super().test_training_new_tokenizer()
finally:
# Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer
# is restored
shutil.rmtree(self.tmpdirname)
self.tmpdirname = tmpdirname_orig
def test_training_new_tokenizer_with_special_tokens_change(self):
tmpdirname_orig = self.tmpdirname
# Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel.
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
try:
self.tmpdirname = tempfile.mkdtemp()
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer.save_pretrained(self.tmpdirname)
super().test_training_new_tokenizer_with_special_tokens_change()
finally:
# Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer
# is restored
shutil.rmtree(self.tmpdirname)
self.tmpdirname = tmpdirname_orig
def test_training_new_tokenizer_with_bytelevel(self):
tokenizer = self.rust_tokenizer_class.from_pretrained(self.bytelevel_bpe_model_name)
toy_text_iterator = ("a" for _ in range(1000))
new_tokenizer = tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50)
encoding_ids = new_tokenizer.encode("a🤗")
self.assertEqual(encoding_ids, [64, 172, 253, 97, 245])
def test_init_from_tokenizers_model(self):
from tokenizers import Tokenizer
sentences = ["Hello, y'all!", "How are you 😁 ? There should not be any issue right?"]
tokenizer = Tokenizer.from_pretrained("google-t5/t5-base")
# Enable padding
tokenizer.enable_padding(pad_id=0, pad_token="<pad>", length=512, pad_to_multiple_of=8)
self.assertEqual(
tokenizer.padding,
{
"length": 512,
"pad_to_multiple_of": 8,
"pad_id": 0,
"pad_token": "<pad>",
"pad_type_id": 0,
"direction": "right",
},
)
fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)
tmpdirname = tempfile.mkdtemp()
fast_tokenizer.save_pretrained(tmpdirname)
fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname)
for tok in [fast_tokenizer, fast_from_saved]:
self.assertEqual(tok.pad_token_id, 0)
self.assertEqual(tok.padding_side, "right")
self.assertEqual(tok.pad_token, "<pad>")
self.assertEqual(tok.init_kwargs["max_length"], 512)
self.assertEqual(tok.init_kwargs["pad_to_multiple_of"], 8)
self.assertEqual(tok(sentences, padding = True), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1, 0, 0, 0, 0,0, 0, 0, 0],[ 571, 33, 25, 3, 2, 3, 58, 290, 225, 59, 36, 136, 962, 269, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip
tokenizer.enable_truncation(8, stride=0, strategy="longest_first", direction="right")
self.assertEqual(
tokenizer.truncation, {"max_length": 8, "stride": 0, "strategy": "longest_first", "direction": "right"}
)
fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)
tmpdirname = tempfile.mkdtemp()
fast_tokenizer.save_pretrained(tmpdirname)
fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname)
for tok in [fast_tokenizer, fast_from_saved]:
self.assertEqual(tok.truncation_side, "right")
self.assertEqual(tok.init_kwargs["truncation_strategy"], "longest_first")
self.assertEqual(tok.init_kwargs["max_length"], 8)
self.assertEqual(tok.init_kwargs["stride"], 0)
# NOTE even if the model has a default max_length, it is not used...
# thus tok(sentences, truncation = True) does nothing and does not warn either
self.assertEqual(tok(sentences, truncation = True, max_length = 8), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1],[ 571, 33, 25, 3, 2, 3, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip
def test_class_after_save_and_reload(self):
# Model contains a `LlamaTokenizerFast` tokenizer with no slow fallback
model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
with tempfile.TemporaryDirectory() as temp_dir:
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
self.assertTrue(
isinstance(tokenizer, LlamaTokenizerFast),
f"Expected tokenizer(use_fast=True) type: `LlamaTokenizerFast`, actual=`{type(tokenizer)}`",
)
# Fast tokenizer will ignore `use_fast=False`
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
self.assertTrue(
isinstance(tokenizer, LlamaTokenizerFast),
f"Expected tokenizer type(use_fast=False): `LlamaTokenizerFast`, actual=`{type(tokenizer)}`",
)
# Save tokenizer
tokenizer.save_pretrained(temp_dir)
tokenizer = AutoTokenizer.from_pretrained(temp_dir, use_fast=False)
# Verify post save and reload the fast tokenizer class did not change
self.assertTrue(
isinstance(tokenizer, LlamaTokenizerFast),
f"Expected tokenizer type: `LlamaTokenizerFast`, actual=`{type(tokenizer)}`",
)
tokenizer = AutoTokenizer.from_pretrained(temp_dir, use_fast=True)
# Verify post save and reload the fast tokenizer class did not change
self.assertTrue(
isinstance(tokenizer, LlamaTokenizerFast),
f"Expected tokenizer type: `LlamaTokenizerFast`, actual=`{type(tokenizer)}`",
)
@require_tokenizers
class TokenizerVersioningTest(unittest.TestCase):
def test_local_versioning(self):
tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
json_tokenizer = json.loads(tokenizer._tokenizer.to_str())
json_tokenizer["model"]["vocab"]["huggingface"] = len(tokenizer)
with tempfile.TemporaryDirectory() as tmp_dir:
# Hack to save this in the tokenizer_config.json
tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.4.0.0.json"]
tokenizer.save_pretrained(tmp_dir)
json.dump(json_tokenizer, open(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), "w"))
# This should pick the new tokenizer file as the version of Transformers is > 4.0.0
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertEqual(len(new_tokenizer), len(tokenizer) + 1)
json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str())
self.assertIn("huggingface", json_tokenizer["model"]["vocab"])
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old tokenizer file as the version of Transformers is < 4.0.0
shutil.move(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), os.path.join(tmp_dir, "tokenizer.42.0.0.json"))
tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.42.0.0.json"]
tokenizer.save_pretrained(tmp_dir)
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertEqual(len(new_tokenizer), len(tokenizer))
json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str())
self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"])
def test_repo_versioning(self):
# This repo has two tokenizer files, one for v4.0.0 and above with an added token, one for versions lower.
repo = "hf-internal-testing/test-two-tokenizers"
# This should pick the new tokenizer file as the version of Transformers is > 4.0.0
tokenizer = AutoTokenizer.from_pretrained(repo)
self.assertEqual(len(tokenizer), 28997)
json_tokenizer = json.loads(tokenizer._tokenizer.to_str())
self.assertIn("huggingface", json_tokenizer["model"]["vocab"])
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
old_transformers.tokenization_utils_base.__version__ = "3.0.0"
old_tokenizer = old_transformers.models.auto.AutoTokenizer.from_pretrained(repo)
self.assertEqual(len(old_tokenizer), 28996)
json_tokenizer = json.loads(old_tokenizer._tokenizer.to_str())
self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"])
@require_tokenizers
class ReduceMutableBorrowTests(unittest.TestCase):
def test_async_share_tokenizer(self):
# See https://github.com/huggingface/transformers/pull/12550
# and https://github.com/huggingface/tokenizers/issues/537
tokenizer = PreTrainedTokenizerFast.from_pretrained("robot-test/dummy-tokenizer-wordlevel")
text = "The Matrix is a 1999 science fiction action film."
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(self.fetch, tokenizer, text) for i in range(10)]
return_value = [future.result() for future in futures]
self.assertEqual(return_value, [[1, 10, 0, 8, 0, 18, 0, 0, 0, 2] for i in range(10)])
def fetch(self, tokenizer, text):
return tokenizer.encode(text, truncation="longest_first", padding="longest")
| transformers/tests/tokenization/test_tokenization_fast.py/0 | {
"file_path": "transformers/tests/tokenization/test_tokenization_fast.py",
"repo_id": "transformers",
"token_count": 5894
} | 625 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from transformers.dynamic_module_utils import get_imports
TOP_LEVEL_IMPORT = """
import os
"""
IMPORT_IN_FUNCTION = """
def foo():
import os
return False
"""
DEEPLY_NESTED_IMPORT = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
TOP_LEVEL_TRY_IMPORT = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
TRY_IMPORT_IN_FUNCTION = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
MULTIPLE_EXCEPTS_IMPORT = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
EXCEPT_AS_IMPORT = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
GENERIC_EXCEPT_IMPORT = """
import os
try:
import bar
except:
raise ValueError()
"""
MULTILINE_TRY_IMPORT = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
MULTILINE_BOTH_IMPORT = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
CASES = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case", CASES)
def test_import_parsing(tmp_path, case):
tmp_file_path = os.path.join(tmp_path, "test_file.py")
with open(tmp_file_path, "w") as _tmp_file:
_tmp_file.write(case)
parsed_imports = get_imports(tmp_file_path)
assert parsed_imports == ["os"]
| transformers/tests/utils/test_dynamic_module_utils.py/0 | {
"file_path": "transformers/tests/utils/test_dynamic_module_utils.py",
"repo_id": "transformers",
"token_count": 918
} | 626 |
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unittest
from transformers import LlamaConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
if is_torch_available():
import torch
from transformers import ROPE_INIT_FUNCTIONS
from transformers.modeling_rope_utils import rope_config_validation
@require_torch
class RopeTest(unittest.TestCase):
def test_rope_validation(self):
config = LlamaConfig()
all_rope_types = ROPE_INIT_FUNCTIONS.keys()
# The base config is always valid (default RoPE)
rope_config_validation(config)
# If we explicitly set the other RoPE types, then validation should fail
for rope_type in all_rope_types:
if rope_type != "default":
config.rope_scaling = {"rope_type": rope_type}
with self.assertRaises(KeyError):
rope_config_validation(config)
# Parameters are exclusive to their own RoPE type, and should raise an exception if incorrectly passed
valid_param_mapping = {
"factor": ["linear", "dynamic", "yarn", "longrope"],
"attention_factor": ["yarn", "longrope"],
"beta_fast": ["yarn"],
"beta_slow": ["yarn"],
"short_factor": ["longrope"],
"long_factor": ["longrope"],
}
for rope_type in all_rope_types:
if rope_type == "default":
continue # checked above
for param, valid_rope_types in valid_param_mapping.items():
# Set `param` with a dummy value -- we want to test the dict key
config.rope_scaling = {"rope_type": rope_type, param: True}
if rope_type in valid_rope_types:
continue
else:
with self.assertRaises(KeyError):
rope_config_validation(config)
# Any other parameters passed to RoPE will raise a warning that a particular key is not used
# But sometimes we can have model-specific RoPE kwargs and bypass warning with `ignore_keys`
model_specific_kwarg = "mrope_sections" # e,g in Qwen2-VL
for rope_type in all_rope_types:
if rope_type == "default":
config.rope_scaling = {"rope_type": rope_type, model_specific_kwarg: True}
rope_config_validation(config, ignore_keys={model_specific_kwarg})
with self.assertLogs("transformers.modeling_rope_utils", level="WARNING") as logs:
rope_config_validation(config)
self.assertEqual(len(logs.output), 1)
self.assertIn(model_specific_kwarg, logs.output[0])
def test_default_rope_numerically(self):
# Note: some RoPE scaling methods start off by calling the default RoPE frequencies. If this test fails, then
# multiple RoPE strategies will fail.
# fmt: off
EXPECTED_INV_FREQ = torch.tensor(
[
1.0000e+00, 8.6596e-01, 7.4989e-01, 6.4938e-01, 5.6234e-01, 4.8697e-01,
4.2170e-01, 3.6517e-01, 3.1623e-01, 2.7384e-01, 2.3714e-01, 2.0535e-01,
1.7783e-01, 1.5399e-01, 1.3335e-01, 1.1548e-01, 1.0000e-01, 8.6596e-02,
7.4989e-02, 6.4938e-02, 5.6234e-02, 4.8697e-02, 4.2170e-02, 3.6517e-02,
3.1623e-02, 2.7384e-02, 2.3714e-02, 2.0535e-02, 1.7783e-02, 1.5399e-02,
1.3335e-02, 1.1548e-02, 1.0000e-02, 8.6596e-03, 7.4989e-03, 6.4938e-03,
5.6234e-03, 4.8697e-03, 4.2170e-03, 3.6517e-03, 3.1623e-03, 2.7384e-03,
2.3714e-03, 2.0535e-03, 1.7783e-03, 1.5399e-03, 1.3335e-03, 1.1548e-03,
1.0000e-03, 8.6596e-04, 7.4989e-04, 6.4938e-04, 5.6234e-04, 4.8697e-04,
4.2170e-04, 3.6517e-04, 3.1623e-04, 2.7384e-04, 2.3714e-04, 2.0535e-04,
1.7783e-04, 1.5399e-04, 1.3335e-04, 1.1548e-04
], device=torch_device
)
# fmt: on
# input sanity checks: if these change, the output will also change
config = LlamaConfig()
self.assertEqual(config.rope_scaling, None)
self.assertEqual(config.hidden_size, 4096)
self.assertEqual(config.num_attention_heads, 32)
self.assertEqual(config.rope_theta, 10000.0)
self.assertFalse(hasattr(config, "partial_rotary_factor"))
rope_fn = ROPE_INIT_FUNCTIONS["default"]
inv_freq, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, 1.0) # attention scale is always 1 for default RoPE
torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ)
def test_linear_rope_numerically(self):
# This is a linear scaling strategy, the **frequencies** are scaled linearly with respect to the default
# frequencies (= the inverse frequencies are scaled **inversely**)
config = LlamaConfig()
default_rope_fn = ROPE_INIT_FUNCTIONS["default"]
default_inv_freq, _ = default_rope_fn(config=config, device=torch_device)
rope_fn = ROPE_INIT_FUNCTIONS["linear"]
for factor in (2.0, 10.0, 20.0):
config.rope_scaling = {"rope_type": "linear", "factor": factor}
inv_freq, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, 1.0) # attention scale is always 1 for linear RoPE
torch.testing.assert_close(inv_freq, default_inv_freq / factor)
def test_dynamic_rope_numerically(self):
# fmt: off
EXPECTED_INV_FREQ = torch.tensor(
[
1.0000e+00, 8.0931e-01, 6.5498e-01, 5.3008e-01, 4.2900e-01, 3.4720e-01,
2.8099e-01, 2.2741e-01, 1.8404e-01, 1.4895e-01, 1.2055e-01, 9.7558e-02,
7.8955e-02, 6.3899e-02, 5.1714e-02, 4.1853e-02, 3.3872e-02, 2.7413e-02,
2.2185e-02, 1.7955e-02, 1.4531e-02, 1.1760e-02, 9.5176e-03, 7.7027e-03,
6.2339e-03, 5.0451e-03, 4.0831e-03, 3.3045e-03, 2.6744e-03, 2.1644e-03,
1.7517e-03, 1.4176e-03, 1.1473e-03, 9.2852e-04, 7.5146e-04, 6.0817e-04,
4.9220e-04, 3.9834e-04, 3.2238e-04, 2.6091e-04, 2.1115e-04, 1.7089e-04,
1.3830e-04, 1.1193e-04, 9.0585e-05, 7.3312e-05, 5.9332e-05, 4.8018e-05,
3.8861e-05, 3.1451e-05, 2.5453e-05, 2.0600e-05, 1.6672e-05, 1.3492e-05,
1.0920e-05, 8.8374e-06, 7.1522e-06, 5.7883e-06, 4.6845e-06, 3.7912e-06,
3.0683e-06, 2.4832e-06, 2.0097e-06, 1.6265e-06
], device=torch_device
)
# fmt: on
# input sanity checks: if these change, the output will also change
config = LlamaConfig()
self.assertEqual(config.rope_scaling, None)
self.assertEqual(config.hidden_size, 4096)
self.assertEqual(config.num_attention_heads, 32)
self.assertEqual(config.rope_theta, 10000.0)
self.assertFalse(hasattr(config, "partial_rotary_factor"))
rope_fn = ROPE_INIT_FUNCTIONS["default"]
default_inv_freq, _ = rope_fn(config=config, device=torch_device)
# Check 1: this is a dynamic scaling strategy, it will not scale unless we provide `seq_len` larger than the
# model's original training sequence length
rope_fn = ROPE_INIT_FUNCTIONS["dynamic"]
for factor in (2.0, 10.0, 20.0):
config.rope_scaling = {"rope_type": "dynamic", "factor": factor}
inv_freq, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, 1.0) # attention scale is always 1 for dynamic RoPE
torch.testing.assert_close(inv_freq, default_inv_freq)
inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=1)
torch.testing.assert_close(inv_freq, default_inv_freq)
inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=torch.tensor(1, dtype=torch.int64))
torch.testing.assert_close(inv_freq, default_inv_freq)
# Check 2: if we provide `seq_len` larger than the model's original training sequence length, the frequencies
# will scale up (i.e., the inverse frequencies will scale down).
factor = 10.0
config.rope_scaling = {"rope_type": "dynamic", "factor": factor}
inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=16384)
with self.assertRaises(AssertionError): # It is NOT a linear factor
torch.testing.assert_close(inv_freq, default_inv_freq / factor)
torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ)
def test_yarn_rope_numerically(self):
# fmt: off
EXPECTED_INV_FREQ = torch.tensor(
[
1.0000e+00, 8.6596e-01, 7.4989e-01, 6.4938e-01, 5.6234e-01, 4.8697e-01,
4.2170e-01, 3.6517e-01, 3.1623e-01, 2.7384e-01, 2.3714e-01, 2.0535e-01,
1.7783e-01, 1.5399e-01, 1.3335e-01, 1.1548e-01, 1.0000e-01, 8.3479e-02,
6.9590e-02, 5.7925e-02, 4.8136e-02, 3.9931e-02, 3.3061e-02, 2.7315e-02,
2.2515e-02, 1.8512e-02, 1.5177e-02, 1.2403e-02, 1.0101e-02, 8.1924e-03,
6.6143e-03, 5.3120e-03, 4.2400e-03, 3.3599e-03, 2.6396e-03, 2.0520e-03,
1.5746e-03, 1.1882e-03, 8.7713e-04, 6.2810e-04, 4.3007e-04, 2.7384e-04,
2.3714e-04, 2.0535e-04, 1.7783e-04, 1.5399e-04, 1.3335e-04, 1.1548e-04,
1.0000e-04, 8.6596e-05, 7.4989e-05, 6.4938e-05, 5.6234e-05, 4.8697e-05,
4.2170e-05, 3.6517e-05, 3.1623e-05, 2.7384e-05, 2.3714e-05, 2.0535e-05,
1.7783e-05, 1.5399e-05, 1.3335e-05, 1.1548e-05
], device=torch_device
)
# fmt: on
# input sanity checks: if these change, the output will also change
config = LlamaConfig()
self.assertEqual(config.rope_scaling, None)
self.assertEqual(config.hidden_size, 4096)
self.assertEqual(config.num_attention_heads, 32)
self.assertEqual(config.rope_theta, 10000.0)
self.assertFalse(hasattr(config, "partial_rotary_factor"))
rope_fn = ROPE_INIT_FUNCTIONS["default"]
default_inv_freq, _ = rope_fn(config=config, device=torch_device)
# Check 1: according to the paper, if `attention_factor` is not specified, then it has a specific default --
# `0.1 * math.log(factor) + 1.0`
rope_fn = ROPE_INIT_FUNCTIONS["yarn"]
for factor in (2.0, 10.0, 20.0):
config.rope_scaling = {"rope_type": "yarn", "factor": factor}
_, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, 0.1 * math.log(factor) + 1.0)
config.rope_scaling = {"rope_type": "yarn", "factor": factor, "attention_factor": 0.5}
_, attention_scale = rope_fn(config=config, device=torch_device, seq_len=1)
self.assertEqual(attention_scale, 0.5)
# Check 2: based on `beta_fast` and `beta_slow`, the frequencies will be scaled between 1 and `factor`.
# Increasing `beta_fast` will make RoPE more interpolative (apply scaling), and the other way around.
# `beta_slow` behaves the opposite way. Remember: `beta_fast` > `beta_slow`
# (note: adds a margin to the test for numerical stability)
factor = 10.0
margin = 1e-8
config.rope_scaling = {"rope_type": "yarn", "factor": factor, "beta_fast": 32, "beta_slow": 1}
inv_freq, _ = rope_fn(config=config, device=torch_device)
is_bounded_by_factor = [
((default_inv_freq[idx] / factor) - margin) <= yarn_inv_freq_value <= (default_inv_freq[idx] + margin)
for idx, yarn_inv_freq_value in enumerate(inv_freq)
]
self.assertTrue(all(is_bounded_by_factor))
# super high beta_fast = interpolation (i.e. scaling) in all but the first inverse frequency. The last ~20
# values (empirically checked for `beta_fast` = 1000) should be very small to linear scaling
config.rope_scaling = {"rope_type": "yarn", "factor": factor, "beta_fast": 1000, "beta_slow": 1}
inv_freq, _ = rope_fn(config=config, device=torch_device)
is_interpolating = [
yarn_inv_freq_value < (default_inv_freq[idx] + margin) for idx, yarn_inv_freq_value in enumerate(inv_freq)
]
self.assertFalse(is_interpolating[0])
self.assertTrue(all(is_interpolating[1:]))
torch.testing.assert_close(inv_freq[-20:], default_inv_freq[-20:] / factor)
# Check 3: numerical snapshot to avoid regressions
config.rope_scaling = {"rope_type": "yarn", "factor": factor, "beta_fast": 32, "beta_slow": 1}
inv_freq, _ = rope_fn(config=config, device=torch_device)
torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ)
def test_longrope_rope_numerically(self):
# input sanity checks: if these change, the output will also change
config = LlamaConfig()
self.assertEqual(config.rope_scaling, None)
self.assertEqual(config.hidden_size, 4096)
self.assertEqual(config.num_attention_heads, 32)
self.assertEqual(config.rope_theta, 10000.0)
self.assertFalse(hasattr(config, "partial_rotary_factor"))
# longrope applies scaling on EACH inv frequency, `short_factor` or `long_factor`, depending on the seq_len
dim = config.hidden_size // config.num_attention_heads
short_factor = [2.0] * (dim // 2) # scaling applied when seq_len <= max_position_embeddings
long_factor = torch.ones(dim // 2).cumsum(0).tolist() # scaling applied when seq_len > max_position_embeddings
rope_fn = ROPE_INIT_FUNCTIONS["default"]
default_inv_freq, _ = rope_fn(config=config, device=torch_device)
# Check 1: according to the paper, if `attention_factor` is not specified, then it has a specific default --
# `math.sqrt(1 + math.log(factor) / math.log(max_position_embeddings))`
rope_fn = ROPE_INIT_FUNCTIONS["longrope"]
max_position_embeddings = config.max_position_embeddings
for factor in (2.0, 10.0, 20.0):
config.rope_scaling = {
"rope_type": "longrope",
"factor": factor,
"short_factor": short_factor,
"long_factor": long_factor,
}
_, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, math.sqrt(1 + math.log(factor) / math.log(max_position_embeddings)))
config.rope_scaling = {
"rope_type": "longrope",
"factor": factor,
"short_factor": short_factor,
"long_factor": long_factor,
"attention_factor": 0.5,
}
_, attention_scale = rope_fn(config=config, device=torch_device, seq_len=1)
self.assertEqual(attention_scale, 0.5)
config.rope_scaling = {
"rope_type": "longrope",
"factor": factor,
"short_factor": short_factor,
"long_factor": long_factor,
}
self.assertEqual(config.rope_scaling.get("attention_factor"), None)
# Verify that "TypeError: '<' not supported between instances of 'NoneType' and 'int'" is not raised.
rope_config_validation(config)
# Check 2: seq_len == 0 -> short factor is applied to the default frequencies
config.rope_scaling = {
"rope_type": "longrope",
"factor": 1.0,
"short_factor": short_factor,
"long_factor": long_factor,
}
inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=0)
torch.testing.assert_close(inv_freq, default_inv_freq / torch.tensor(short_factor).to(torch_device))
# Check 3: seq_len > max_position_embeddings -> long factor is applied to the default frequencies
inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=config.max_position_embeddings + 1)
torch.testing.assert_close(inv_freq, default_inv_freq / torch.tensor(long_factor).to(torch_device))
def test_llama3_rope_numerically(self):
# fmt: off
EXPECTED_INV_FREQ = torch.tensor(
[
1.0000e+00, 8.6596e-01, 7.4989e-01, 6.4938e-01, 5.6234e-01, 4.8697e-01,
4.2170e-01, 3.6517e-01, 3.1623e-01, 2.7384e-01, 2.3714e-01, 2.0535e-01,
1.7783e-01, 1.5399e-01, 1.3335e-01, 1.1548e-01, 1.0000e-01, 8.6596e-02,
7.4989e-02, 6.4938e-02, 5.6234e-02, 4.8697e-02, 4.2170e-02, 3.6517e-02,
3.1623e-02, 2.7384e-02, 2.3714e-02, 2.0535e-02, 1.7783e-02, 1.5399e-02,
1.3335e-02, 1.0730e-02, 7.7785e-03, 5.6009e-03, 3.9991e-03, 2.8248e-03,
1.9675e-03, 1.3449e-03, 8.9549e-04, 5.7363e-04, 3.4539e-04, 2.7384e-04,
2.3714e-04, 2.0535e-04, 1.7783e-04, 1.5399e-04, 1.3335e-04, 1.1548e-04,
1.0000e-04, 8.6596e-05, 7.4989e-05, 6.4938e-05, 5.6234e-05, 4.8697e-05,
4.2170e-05, 3.6517e-05, 3.1623e-05, 2.7384e-05, 2.3714e-05, 2.0535e-05,
1.7783e-05, 1.5399e-05, 1.3335e-05, 1.1548e-05
], device=torch_device
)
# fmt: on
# input sanity checks: if these change, the output will also change
config = LlamaConfig()
self.assertEqual(config.rope_scaling, None)
self.assertEqual(config.hidden_size, 4096)
self.assertEqual(config.num_attention_heads, 32)
self.assertEqual(config.rope_theta, 10000.0)
self.assertFalse(hasattr(config, "partial_rotary_factor"))
rope_fn = ROPE_INIT_FUNCTIONS["default"]
default_inv_freq, _ = rope_fn(config=config, device=torch_device)
# Check 1: `attention_factor` is always 1
rope_fn = ROPE_INIT_FUNCTIONS["llama3"]
for factor in (2.0, 10.0, 20.0):
config.rope_scaling = {
"rope_type": "llama3",
"factor": factor,
"original_max_position_embeddings": 2048,
"low_freq_factor": 1,
"high_freq_factor": 4,
}
_, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, 1.0)
# Check 2: based on `low_freq_factor` and `high_freq_factor`, the frequencies will be scaled between 1 and
# `factor` (similar to yarn). Low frequencies get scaled by `factor`, high frequencies see no change, medium
# frequencies are scaled by a value in between. Changing `low_freq_factor` and `high_freq_factor` changes what
# is considered low, medium, and high frequencies.
factor = 10.0
config.rope_scaling = {
"rope_type": "llama3",
"factor": factor,
"original_max_position_embeddings": 2048,
"low_freq_factor": 1,
"high_freq_factor": 4,
}
inv_freq, _ = rope_fn(config=config, device=torch_device)
is_bounded_by_factor = [
(default_inv_freq[idx] / factor) <= llama3_inv_freq_value <= default_inv_freq[idx]
for idx, llama3_inv_freq_value in enumerate(inv_freq)
]
self.assertTrue(all(is_bounded_by_factor))
# if we change `high_freq_factor` to a very high value, none is considered high-frequency -> ALL values will be
# scaled
config.rope_scaling = config.rope_scaling = {
"rope_type": "llama3",
"factor": factor,
"original_max_position_embeddings": 2048,
"low_freq_factor": 1,
"high_freq_factor": 1000,
}
inv_freq, _ = rope_fn(config=config, device=torch_device)
is_scaled = [yarn_inv_freq_value < default_inv_freq[idx] for idx, yarn_inv_freq_value in enumerate(inv_freq)]
self.assertTrue(all(is_scaled))
# Check 3: numerical snapshot to avoid regressions
config.rope_scaling = {
"rope_type": "llama3",
"factor": factor,
"original_max_position_embeddings": 2048,
"low_freq_factor": 1,
"high_freq_factor": 4,
}
inv_freq, _ = rope_fn(config=config, device=torch_device)
torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ)
| transformers/tests/utils/test_modeling_rope_utils.py/0 | {
"file_path": "transformers/tests/utils/test_modeling_rope_utils.py",
"repo_id": "transformers",
"token_count": 10355
} | 627 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility that checks all docstrings of public objects have an argument section matching their signature.
Use from the root of the repo with:
```bash
python utils/check_docstrings.py
```
for a check that will error in case of inconsistencies (used by `make repo-consistency`).
To auto-fix issues run:
```bash
python utils/check_docstrings.py --fix_and_overwrite
```
which is used by `make fix-copies` (note that this fills what it cans, you might have to manually fill information
like argument descriptions).
"""
import argparse
import ast
import enum
import glob
import inspect
import operator as op
import os
import re
from collections import OrderedDict
from pathlib import Path
from typing import Any, Optional, Union
from check_repo import ignore_undocumented
from git import Repo
from transformers.utils import direct_transformers_import
from transformers.utils.auto_docstring import (
ImageProcessorArgs,
ModelArgs,
ModelOutputArgs,
get_args_doc_from_source,
parse_docstring,
set_min_indent,
)
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
PATH_TO_TRANSFORMERS = Path("src").resolve() / "transformers"
# This is to make sure the transformers module imported is the one in the repo.
transformers = direct_transformers_import(PATH_TO_TRANSFORMERS)
OPTIONAL_KEYWORD = "*optional*"
# Re pattern that catches args blocks in docstrings (with all variation around the name supported).
_re_args = re.compile(r"^\s*(Args?|Arguments?|Attributes?|Params?|Parameters?):\s*$")
# Re pattern that parses the start of an arg block: catches <name> (<description>) in those lines.
_re_parse_arg = re.compile(r"^(\s*)(\S+)\s+\((.+)\)(?:\:|$)")
# Re pattern that parses the end of a description of an arg (catches the default in *optional*, defaults to xxx).
_re_parse_description = re.compile(r"\*optional\*, defaults to (.*)$")
# Args that are always overridden in the docstring, for clarity we don't want to remove them from the docstring
ALWAYS_OVERRIDE = ["labels"]
# This is a temporary list of objects to ignore while we progressively fix them. Do not add anything here, fix the
# docstrings instead. If formatting should be ignored for the docstring, you can put a comment # no-format on the
# line before the docstring.
OBJECTS_TO_IGNORE = [
"Mxfp4Config",
"Exaone4Config",
"SmolLM3Config",
"Gemma3nVisionConfig",
"Llama4Processor",
# Deprecated
"InputExample",
"InputFeatures",
# Signature is *args/**kwargs
"TFSequenceSummary",
"TFBertTokenizer",
"TFGPT2Tokenizer",
# Missing arguments in the docstring
"ASTFeatureExtractor",
"AlbertModel",
"AlbertTokenizerFast",
"AlignTextModel",
"AlignVisionConfig",
"AudioClassificationPipeline",
"AutoformerConfig",
"AutomaticSpeechRecognitionPipeline",
"BarkCoarseConfig",
"BarkConfig",
"BarkFineConfig",
"BarkSemanticConfig",
"BartConfig",
"BartTokenizerFast",
"BarthezTokenizerFast",
"BeitModel",
"BertConfig",
"BertJapaneseTokenizer",
"BertModel",
"BertTokenizerFast",
"BigBirdConfig",
"BigBirdForQuestionAnswering",
"BigBirdModel",
"BigBirdPegasusConfig",
"BigBirdTokenizerFast",
"BitImageProcessor",
"BlenderbotConfig",
"BlenderbotSmallConfig",
"BlenderbotSmallTokenizerFast",
"BlenderbotTokenizerFast",
"Blip2VisionConfig",
"BlipTextConfig",
"BlipVisionConfig",
"BloomConfig",
"BloomTokenizerFast",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
"BrosModel",
"CamembertConfig",
"CamembertModel",
"CamembertTokenizerFast",
"CanineModel",
"CanineTokenizer",
"ChineseCLIPTextModel",
"ClapTextConfig",
"ConditionalDetrConfig",
"ConditionalDetrImageProcessor",
"ConvBertConfig",
"ConvBertTokenizerFast",
"ConvNextConfig",
"ConvNextV2Config",
"CpmAntTokenizer",
"CvtConfig",
"CvtModel",
"DeiTImageProcessor",
"DPRReaderTokenizer",
"DPRReaderTokenizerFast",
"DPTModel",
"Data2VecAudioConfig",
"Data2VecTextConfig",
"Data2VecTextModel",
"Data2VecVisionModel",
"DataCollatorForLanguageModeling",
"DebertaConfig",
"DebertaV2Config",
"DebertaV2Tokenizer",
"DebertaV2TokenizerFast",
"DecisionTransformerConfig",
"DeformableDetrConfig",
"DeformableDetrImageProcessor",
"DeiTModel",
"DepthEstimationPipeline",
"DetaConfig",
"DetaImageProcessor",
"DetrConfig",
"DetrImageProcessor",
"DinatModel",
"DINOv3ConvNextConfig",
"DINOv3ViTConfig",
"DistilBertConfig",
"DistilBertTokenizerFast",
"DocumentQuestionAnsweringPipeline",
"DonutSwinModel",
"EarlyStoppingCallback",
"EfficientFormerConfig",
"EfficientFormerImageProcessor",
"EfficientNetConfig",
"ElectraConfig",
"ElectraTokenizerFast",
"EncoderDecoderModel",
"ErnieMModel",
"ErnieModel",
"ErnieMTokenizer",
"EsmConfig",
"EsmModel",
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxBartForCausalLM",
"FlaxBartForConditionalGeneration",
"FlaxBartForQuestionAnswering",
"FlaxBartForSequenceClassification",
"FlaxBartModel",
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBigBirdForCausalLM",
"FlaxBigBirdForMaskedLM",
"FlaxBigBirdForMultipleChoice",
"FlaxBigBirdForPreTraining",
"FlaxBigBirdForQuestionAnswering",
"FlaxBigBirdForSequenceClassification",
"FlaxBigBirdForTokenClassification",
"FlaxBigBirdModel",
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBloomForCausalLM",
"FlaxBloomModel",
"FlaxCLIPModel",
"FlaxDinov2ForImageClassification",
"FlaxDinov2Model",
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxEncoderDecoderModel",
"FlaxGPT2LMHeadModel",
"FlaxGPT2Model",
"FlaxGPTJForCausalLM",
"FlaxGPTJModel",
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxLlamaForCausalLM",
"FlaxLlamaModel",
"FlaxGemmaForCausalLM",
"FlaxGemmaModel",
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMarianMTModel",
"FlaxMarianModel",
"FlaxMistralForCausalLM",
"FlaxMistralModel",
"FlaxOPTForCausalLM",
"FlaxPegasusForConditionalGeneration",
"FlaxPegasusModel",
"FlaxRegNetForImageClassification",
"FlaxRegNetModel",
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxSpeechEncoderDecoderModel",
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxVisionEncoderDecoderModel",
"FlaxVisionTextDualEncoderModel",
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWhisperForAudioClassification",
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperTimeStampLogitsProcessor",
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FNetConfig",
"FNetModel",
"FNetTokenizerFast",
"FSMTConfig",
"FeatureExtractionPipeline",
"FillMaskPipeline",
"FlaubertConfig",
"FlavaConfig",
"FlavaForPreTraining",
"FlavaImageModel",
"FlavaImageProcessor",
"FlavaMultimodalModel",
"FlavaTextConfig",
"FlavaTextModel",
"FocalNetModel",
"FunnelTokenizerFast",
"GPTBigCodeConfig",
"GPTJConfig",
"GPTNeoXConfig",
"GPTNeoXJapaneseConfig",
"GPTNeoXTokenizerFast",
"GPTSanJapaneseConfig",
"GitConfig",
"GitVisionConfig",
"GraphormerConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
"HerbertTokenizerFast",
"HubertConfig",
"HubertForCTC",
"IBertConfig",
"IBertModel",
"IdeficsConfig",
"IdeficsProcessor",
"IJepaModel",
"ImageClassificationPipeline",
"ImageFeatureExtractionPipeline",
"ImageGPTConfig",
"ImageSegmentationPipeline",
"ImageTextToTextPipeline",
"ImageToImagePipeline",
"ImageToTextPipeline",
"InformerConfig",
"JukeboxPriorConfig",
"JukeboxTokenizer",
"LEDConfig",
"LEDTokenizerFast",
"LayoutLMForQuestionAnswering",
"LayoutLMTokenizerFast",
"LayoutLMv2Config",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2TokenizerFast",
"LayoutLMv3Config",
"LayoutLMv3ImageProcessor",
"LayoutLMv3TokenizerFast",
"LayoutXLMTokenizerFast",
"LevitConfig",
"LiltConfig",
"LiltModel",
"LongT5Config",
"LongformerConfig",
"LongformerModel",
"LongformerTokenizerFast",
"LukeModel",
"LukeTokenizer",
"LxmertTokenizerFast",
"M2M100Config",
"M2M100Tokenizer",
"MarkupLMProcessor",
"MaskGenerationPipeline",
"MBart50TokenizerFast",
"MBartConfig",
"MCTCTFeatureExtractor",
"MPNetConfig",
"MPNetModel",
"MPNetTokenizerFast",
"MT5Config",
"MT5TokenizerFast",
"MarianConfig",
"MarianTokenizer",
"MarkupLMConfig",
"MarkupLMModel",
"MarkupLMTokenizer",
"MarkupLMTokenizerFast",
"Mask2FormerConfig",
"MaskFormerConfig",
"MaxTimeCriteria",
"MegaConfig",
"MegaModel",
"MegatronBertConfig",
"MegatronBertForPreTraining",
"MegatronBertModel",
"MLCDVisionConfig",
"MobileBertConfig",
"MobileBertModel",
"MobileBertTokenizerFast",
"MobileNetV1ImageProcessor",
"MobileNetV1Model",
"MobileNetV2ImageProcessor",
"MobileNetV2Model",
"MobileViTModel",
"MobileViTV2Model",
"MLukeTokenizer",
"MraConfig",
"MusicgenDecoderConfig",
"MusicgenForConditionalGeneration",
"MusicgenMelodyForConditionalGeneration",
"MvpConfig",
"MvpTokenizerFast",
"MT5Tokenizer",
"NatModel",
"NerPipeline",
"NezhaConfig",
"NezhaModel",
"NllbMoeConfig",
"NllbTokenizer",
"NllbTokenizerFast",
"NystromformerConfig",
"OPTConfig",
"ObjectDetectionPipeline",
"OneFormerProcessor",
"OpenAIGPTTokenizerFast",
"OpenLlamaConfig",
"PLBartConfig",
"PegasusConfig",
"PegasusTokenizer",
"PegasusTokenizerFast",
"PegasusXConfig",
"PerceiverImageProcessor",
"PerceiverModel",
"PerceiverTokenizer",
"PersimmonConfig",
"Pipeline",
"Pix2StructConfig",
"Pix2StructTextConfig",
"PLBartTokenizer",
"Pop2PianoConfig",
"PreTrainedTokenizer",
"PreTrainedTokenizerBase",
"PreTrainedTokenizerFast",
"PrefixConstrainedLogitsProcessor",
"ProphetNetConfig",
"QDQBertConfig",
"QDQBertModel",
"QuestionAnsweringPipeline",
"RagConfig",
"RagModel",
"RagRetriever",
"RagSequenceForGeneration",
"RagTokenForGeneration",
"RealmConfig",
"RealmForOpenQA",
"RealmScorer",
"RealmTokenizerFast",
"ReformerConfig",
"ReformerTokenizerFast",
"RegNetConfig",
"RemBertConfig",
"RemBertModel",
"RemBertTokenizer",
"RemBertTokenizerFast",
"RetriBertConfig",
"RetriBertTokenizerFast",
"RoCBertConfig",
"RoCBertModel",
"RoCBertTokenizer",
"RoFormerConfig",
"RobertaConfig",
"RobertaModel",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormModel",
"RobertaTokenizerFast",
"SEWConfig",
"SEWDConfig",
"SEWDForCTC",
"SEWForCTC",
"SamConfig",
"SamPromptEncoderConfig",
"SamHQConfig",
"SamHQPromptEncoderConfig",
"SeamlessM4TConfig", # use of unconventional markdown
"SeamlessM4Tv2Config", # use of unconventional markdown
"Seq2SeqTrainingArguments",
"SpecialTokensMixin",
"Speech2Text2Config",
"Speech2Text2Tokenizer",
"Speech2TextTokenizer",
"SpeechEncoderDecoderModel",
"SpeechT5Config",
"SpeechT5Model",
"SplinterConfig",
"SplinterTokenizerFast",
"SqueezeBertTokenizerFast",
"SummarizationPipeline",
"Swin2SRImageProcessor",
"Swinv2Model",
"SwitchTransformersConfig",
"T5Config",
"T5Tokenizer",
"T5TokenizerFast",
"TableQuestionAnsweringPipeline",
"TableTransformerConfig",
"TapasConfig",
"TapasModel",
"TapasTokenizer",
"Text2TextGenerationPipeline",
"TextClassificationPipeline",
"TextGenerationPipeline",
"TFBartForConditionalGeneration",
"TFBartForSequenceClassification",
"TFBartModel",
"TFBertModel",
"TFConvNextModel",
"TFData2VecVisionModel",
"TFDeiTModel",
"TFEncoderDecoderModel",
"TFEsmModel",
"TFMobileViTModel",
"TFRagModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
"TFRepetitionPenaltyLogitsProcessor",
"TFSwinModel",
"TFViTModel",
"TFVisionEncoderDecoderModel",
"TFVisionTextDualEncoderModel",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TimeSeriesTransformerConfig",
"TokenClassificationPipeline",
"TrOCRConfig",
"Phi4MultimodalProcessor",
"TrainerState",
"TrainingArguments",
"TrajectoryTransformerConfig",
"TranslationPipeline",
"TvltImageProcessor",
"UMT5Config",
"UperNetConfig",
"UperNetForSemanticSegmentation",
"ViTHybridImageProcessor",
"ViTHybridModel",
"ViTMSNModel",
"ViTModel",
"VideoClassificationPipeline",
"ViltConfig",
"ViltForImagesAndTextClassification",
"ViltModel",
"VisionEncoderDecoderModel",
"VisionTextDualEncoderModel",
"VisualBertConfig",
"VisualBertModel",
"VisualQuestionAnsweringPipeline",
"VitMatteForImageMatting",
"VitsTokenizer",
"VivitModel",
"Wav2Vec2BertForCTC",
"Wav2Vec2CTCTokenizer",
"Wav2Vec2Config",
"Wav2Vec2ConformerConfig",
"Wav2Vec2ConformerForCTC",
"Wav2Vec2FeatureExtractor",
"Wav2Vec2PhonemeCTCTokenizer",
"WavLMConfig",
"WavLMForCTC",
"WhisperConfig",
"WhisperFeatureExtractor",
"WhisperForAudioClassification",
"XCLIPTextConfig",
"XCLIPVisionConfig",
"XGLMConfig",
"XGLMModel",
"XGLMTokenizerFast",
"XLMConfig",
"XLMProphetNetConfig",
"XLMRobertaConfig",
"XLMRobertaModel",
"XLMRobertaTokenizerFast",
"XLMRobertaXLConfig",
"XLMRobertaXLModel",
"XLNetConfig",
"XLNetTokenizerFast",
"XmodConfig",
"XmodModel",
"YolosImageProcessor",
"YolosModel",
"YosoConfig",
"ZeroShotAudioClassificationPipeline",
"ZeroShotClassificationPipeline",
"ZeroShotImageClassificationPipeline",
"ZeroShotObjectDetectionPipeline",
"Llama4TextConfig",
]
# Supported math operations when interpreting the value of defaults.
MATH_OPERATORS = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Pow: op.pow,
ast.BitXor: op.xor,
ast.USub: op.neg,
}
def find_indent(line: str) -> int:
"""
Returns the number of spaces that start a line indent.
"""
search = re.search(r"^(\s*)(?:\S|$)", line)
if search is None:
return 0
return len(search.groups()[0])
def stringify_default(default: Any) -> str:
"""
Returns the string representation of a default value, as used in docstring: numbers are left as is, all other
objects are in backtiks.
Args:
default (`Any`): The default value to process
Returns:
`str`: The string representation of that default.
"""
if isinstance(default, bool):
# We need to test for bool first as a bool passes isinstance(xxx, (int, float))
return f"`{default}`"
elif isinstance(default, enum.Enum):
# We need to test for enum first as an enum with int values will pass isinstance(xxx, (int, float))
return f"`{str(default)}`"
elif isinstance(default, int):
return str(default)
elif isinstance(default, float):
result = str(default)
return str(round(default, 2)) if len(result) > 6 else result
elif isinstance(default, str):
return str(default) if default.isnumeric() else f'`"{default}"`'
elif isinstance(default, type):
return f"`{default.__name__}`"
else:
return f"`{default}`"
def eval_math_expression(expression: str) -> Optional[Union[float, int]]:
# Mainly taken from the excellent https://stackoverflow.com/a/9558001
"""
Evaluate (safely) a mathematial expression and returns its value.
Args:
expression (`str`): The expression to evaluate.
Returns:
`Optional[Union[float, int]]`: Returns `None` if the evaluation fails in any way and the value computed
otherwise.
Example:
```py
>>> eval_expr('2^6')
4
>>> eval_expr('2**6')
64
>>> eval_expr('1 + 2*3**(4^5) / (6 + -7)')
-5.0
```
"""
try:
return eval_node(ast.parse(expression, mode="eval").body)
except TypeError:
return
def eval_node(node):
if isinstance(node, ast.Constant) and type(node.value) in (int, float, complex):
return node.value
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return MATH_OPERATORS[type(node.op)](eval_node(node.left), eval_node(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return MATH_OPERATORS[type(node.op)](eval_node(node.operand))
else:
raise TypeError(node)
def replace_default_in_arg_description(description: str, default: Any) -> str:
"""
Catches the default value in the description of an argument inside a docstring and replaces it by the value passed.
Args:
description (`str`): The description of an argument in a docstring to process.
default (`Any`): The default value that would be in the docstring of that argument.
Returns:
`str`: The description updated with the new default value.
"""
# Lots of docstrings have `optional` or **opational** instead of *optional* so we do this fix here.
description = description.replace("`optional`", OPTIONAL_KEYWORD)
description = description.replace("**optional**", OPTIONAL_KEYWORD)
if default is inspect._empty:
# No default, make sure the description doesn't have any either
idx = description.find(OPTIONAL_KEYWORD)
if idx != -1:
description = description[:idx].rstrip()
if description.endswith(","):
description = description[:-1].rstrip()
elif default is None:
# Default None are not written, we just set `*optional*`. If there is default that is not None specified in the
# description, we do not erase it (as sometimes we set the default to `None` because the default is a mutable
# object).
idx = description.find(OPTIONAL_KEYWORD)
if idx == -1:
description = f"{description}, {OPTIONAL_KEYWORD}"
elif re.search(r"defaults to `?None`?", description) is not None:
len_optional = len(OPTIONAL_KEYWORD)
description = description[: idx + len_optional]
else:
str_default = None
# For numbers we may have a default that is given by a math operation (1/255 is really popular). We don't
# want to replace those by their actual values.
if isinstance(default, (int, float)) and re.search("defaults to `?(.*?)(?:`|$)", description) is not None:
# Grab the default and evaluate it.
current_default = re.search("defaults to `?(.*?)(?:`|$)", description).groups()[0]
if default == eval_math_expression(current_default):
try:
# If it can be directly converted to the type of the default, it's a simple value
str_default = str(type(default)(current_default))
except Exception:
# Otherwise there is a math operator so we add a code block.
str_default = f"`{current_default}`"
elif isinstance(default, enum.Enum) and default.name == current_default.split(".")[-1]:
# When the default is an Enum (this is often the case for PIL.Image.Resampling), and the docstring
# matches the enum name, keep the existing docstring rather than clobbering it with the enum value.
str_default = f"`{current_default}`"
if str_default is None:
str_default = stringify_default(default)
# Make sure default match
if OPTIONAL_KEYWORD not in description:
description = f"{description}, {OPTIONAL_KEYWORD}, defaults to {str_default}"
elif _re_parse_description.search(description) is None:
idx = description.find(OPTIONAL_KEYWORD)
len_optional = len(OPTIONAL_KEYWORD)
description = f"{description[: idx + len_optional]}, defaults to {str_default}"
else:
description = _re_parse_description.sub(rf"*optional*, defaults to {str_default}", description)
return description
def get_default_description(arg: inspect.Parameter) -> str:
"""
Builds a default description for a parameter that was not documented.
Args:
arg (`inspect.Parameter`): The argument in the signature to generate a description for.
Returns:
`str`: The description.
"""
if arg.annotation is inspect._empty:
arg_type = "<fill_type>"
elif hasattr(arg.annotation, "__name__"):
arg_type = arg.annotation.__name__
else:
arg_type = str(arg.annotation)
if arg.default is inspect._empty:
return f"`{arg_type}`"
elif arg.default is None:
return f"`{arg_type}`, {OPTIONAL_KEYWORD}"
else:
str_default = stringify_default(arg.default)
return f"`{arg_type}`, {OPTIONAL_KEYWORD}, defaults to {str_default}"
def find_source_file(obj: Any) -> Path:
"""
Finds the source file of an object.
Args:
obj (`Any`): The object whose source file we are looking for.
Returns:
`Path`: The source file.
"""
module = obj.__module__
obj_file = PATH_TO_TRANSFORMERS
for part in module.split(".")[1:]:
obj_file = obj_file / part
return obj_file.with_suffix(".py")
def match_docstring_with_signature(obj: Any) -> Optional[tuple[str, str]]:
"""
Matches the docstring of an object with its signature.
Args:
obj (`Any`): The object to process.
Returns:
`Optional[Tuple[str, str]]`: Returns `None` if there is no docstring or no parameters documented in the
docstring, otherwise returns a tuple of two strings: the current documentation of the arguments in the
docstring and the one matched with the signature.
"""
if len(getattr(obj, "__doc__", "")) == 0:
# Nothing to do, there is no docstring.
return
# Read the docstring in the source code to see if there is a special command to ignore this object.
try:
source, _ = inspect.getsourcelines(obj)
except OSError:
source = []
# Find the line where the docstring starts
idx = 0
while idx < len(source) and '"""' not in source[idx]:
idx += 1
ignore_order = False
if idx < len(source):
line_before_docstring = source[idx - 1]
# Match '# no-format' (allowing surrounding whitespaces)
if re.search(r"^\s*#\s*no-format\s*$", line_before_docstring):
# This object is ignored by the auto-docstring tool
return
# Match '# ignore-order' (allowing surrounding whitespaces)
elif re.search(r"^\s*#\s*ignore-order\s*$", line_before_docstring):
ignore_order = True
# Read the signature
signature = inspect.signature(obj).parameters
obj_doc_lines = obj.__doc__.split("\n")
# Get to the line where we start documenting arguments
idx = 0
while idx < len(obj_doc_lines) and _re_args.search(obj_doc_lines[idx]) is None:
idx += 1
if idx == len(obj_doc_lines):
# Nothing to do, no parameters are documented.
return
if "kwargs" in signature and signature["kwargs"].annotation != inspect._empty:
# Inspecting signature with typed kwargs is not supported yet.
return
indent = find_indent(obj_doc_lines[idx])
arguments = {}
current_arg = None
idx += 1
start_idx = idx
# Keep going until the arg section is finished (nonempty line at the same indent level) or the end of the docstring.
while idx < len(obj_doc_lines) and (
len(obj_doc_lines[idx].strip()) == 0 or find_indent(obj_doc_lines[idx]) > indent
):
if find_indent(obj_doc_lines[idx]) == indent + 4:
# New argument -> let's generate the proper doc for it
re_search_arg = _re_parse_arg.search(obj_doc_lines[idx])
if re_search_arg is not None:
_, name, description = re_search_arg.groups()
current_arg = name
if name in signature:
default = signature[name].default
if signature[name].kind is inspect._ParameterKind.VAR_KEYWORD:
default = None
new_description = replace_default_in_arg_description(description, default)
else:
new_description = description
init_doc = _re_parse_arg.sub(rf"\1\2 ({new_description}):", obj_doc_lines[idx])
arguments[current_arg] = [init_doc]
elif current_arg is not None:
arguments[current_arg].append(obj_doc_lines[idx])
idx += 1
# We went too far by one (perhaps more if there are a lot of new lines)
idx -= 1
if current_arg:
while len(obj_doc_lines[idx].strip()) == 0:
arguments[current_arg] = arguments[current_arg][:-1]
idx -= 1
# And we went too far by one again.
idx += 1
old_doc_arg = "\n".join(obj_doc_lines[start_idx:idx])
old_arguments = list(arguments.keys())
arguments = {name: "\n".join(doc) for name, doc in arguments.items()}
# Add missing arguments with a template
for name in set(signature.keys()) - set(arguments.keys()):
arg = signature[name]
# We ignore private arguments or *args/**kwargs (unless they are documented by the user)
if name.startswith("_") or arg.kind in [
inspect._ParameterKind.VAR_KEYWORD,
inspect._ParameterKind.VAR_POSITIONAL,
]:
arguments[name] = ""
else:
arg_desc = get_default_description(arg)
arguments[name] = " " * (indent + 4) + f"{name} ({arg_desc}): <fill_docstring>"
# Arguments are sorted by the order in the signature unless a special comment is put.
if ignore_order:
new_param_docs = [arguments[name] for name in old_arguments if name in signature]
missing = set(signature.keys()) - set(old_arguments)
new_param_docs.extend([arguments[name] for name in missing if len(arguments[name]) > 0])
else:
new_param_docs = [arguments[name] for name in signature if len(arguments[name]) > 0]
new_doc_arg = "\n".join(new_param_docs)
return old_doc_arg, new_doc_arg
def fix_docstring(obj: Any, old_doc_args: str, new_doc_args: str):
"""
Fixes the docstring of an object by replacing its arguments documentation by the one matched with the signature.
Args:
obj (`Any`):
The object whose dostring we are fixing.
old_doc_args (`str`):
The current documentation of the parameters of `obj` in the docstring (as returned by
`match_docstring_with_signature`).
new_doc_args (`str`):
The documentation of the parameters of `obj` matched with its signature (as returned by
`match_docstring_with_signature`).
"""
# Read the docstring in the source code and make sure we have the right part of the docstring
source, line_number = inspect.getsourcelines(obj)
# Get to the line where we start documenting arguments
idx = 0
while idx < len(source) and _re_args.search(source[idx]) is None:
idx += 1
if idx == len(source):
# Args are not defined in the docstring of this object. This can happen when the docstring is inherited.
# In this case, we are not trying to fix it on the child object.
return
# Get to the line where we stop documenting arguments
indent = find_indent(source[idx])
idx += 1
start_idx = idx
while idx < len(source) and (len(source[idx].strip()) == 0 or find_indent(source[idx]) > indent):
idx += 1
idx -= 1
while len(source[idx].strip()) == 0:
idx -= 1
idx += 1
# `old_doc_args` is built from `obj.__doc__`, which may have
# different indentation than the raw source from `inspect.getsourcelines`.
# We use `inspect.cleandoc` to remove indentation uniformly from both
# strings before comparing them.
source_args_as_str = "".join(source[start_idx:idx])
if inspect.cleandoc(source_args_as_str) != inspect.cleandoc(old_doc_args):
# Args are not fully defined in the docstring of this object
obj_file = find_source_file(obj)
actual_args_section = source_args_as_str.rstrip()
raise ValueError(
f"Cannot fix docstring of {obj.__name__} in {obj_file} because the argument section in the source code "
f"does not match the expected format. This usually happens when:\n"
f"1. The argument section is not properly indented\n"
f"2. The argument section contains unexpected formatting\n"
f"3. The docstring parsing failed to correctly identify the argument boundaries\n\n"
f"Expected argument section:\n{repr(old_doc_args)}\n\n"
f"Actual argument section found:\n{repr(actual_args_section)}\n\n"
)
obj_file = find_source_file(obj)
with open(obj_file, "r", encoding="utf-8") as f:
content = f.read()
# Replace content
lines = content.split("\n")
prev_line_indentation = find_indent(lines[line_number + start_idx - 2])
# Now increase the indentation of every line in new_doc_args by prev_line_indentation
new_doc_args = "\n".join([f"{' ' * prev_line_indentation}{line}" for line in new_doc_args.split("\n")])
lines = lines[: line_number + start_idx - 1] + [new_doc_args] + lines[line_number + idx - 1 :]
print(f"Fixing the docstring of {obj.__name__} in {obj_file}.")
with open(obj_file, "w", encoding="utf-8") as f:
f.write("\n".join(lines))
def _find_sig_line(lines, line_end):
parenthesis_count = 0
sig_line_end = line_end
found_sig = False
while not found_sig:
for char in lines[sig_line_end]:
if char == "(":
parenthesis_count += 1
elif char == ")":
parenthesis_count -= 1
if parenthesis_count == 0:
found_sig = True
break
sig_line_end += 1
return sig_line_end
def _find_docstring_end_line(lines, docstring_start_line):
if '"""' not in lines[docstring_start_line]:
return None
docstring_end = docstring_start_line
if docstring_start_line is not None:
docstring_end = docstring_start_line
if not lines[docstring_start_line].count('"""') >= 2:
docstring_end += 1
while '"""' not in lines[docstring_end]:
docstring_end += 1
return docstring_end
def find_matching_model_files(check_all: bool = False):
"""
Find all model files in the transformers repo that should be checked for @auto_docstring,
excluding files with certain substrings.
Returns:
List of file paths.
"""
module_diff_files = None
if not check_all:
module_diff_files = set()
repo = Repo(PATH_TO_REPO)
# Diff from index to unstaged files
for modified_file_diff in repo.index.diff(None):
if modified_file_diff.a_path.startswith("src/transformers"):
module_diff_files.add(os.path.join(PATH_TO_REPO, modified_file_diff.a_path))
# Diff from index to `main`
for modified_file_diff in repo.index.diff(repo.refs.main.commit):
if modified_file_diff.a_path.startswith("src/transformers"):
module_diff_files.add(os.path.join(PATH_TO_REPO, modified_file_diff.a_path))
# quick escape route: if there are no module files in the diff, skip this check
if len(module_diff_files) == 0:
return None
modeling_glob_pattern = os.path.join(PATH_TO_TRANSFORMERS, "models/**/modeling_**")
potential_files = glob.glob(modeling_glob_pattern)
image_processing_glob_pattern = os.path.join(PATH_TO_TRANSFORMERS, "models/**/image_processing_*_fast.py")
potential_files += glob.glob(image_processing_glob_pattern)
exclude_substrings = ["modeling_tf_", "modeling_flax_"]
matching_files = []
for file_path in potential_files:
if os.path.isfile(file_path):
filename = os.path.basename(file_path)
is_excluded = any(exclude in filename for exclude in exclude_substrings)
if not is_excluded:
matching_files.append(file_path)
if not check_all:
# intersect with module_diff_files
matching_files = sorted([file for file in matching_files if file in module_diff_files])
print(" Checking auto_docstrings in the following files:" + "\n - " + "\n - ".join(matching_files))
return matching_files
def find_files_with_auto_docstring(matching_files, decorator="@auto_docstring"):
"""
From a list of files, return those that contain the @auto_docstring decorator.
"""
auto_docstrings_files = []
for file_path in matching_files:
with open(file_path, "r", encoding="utf-8") as f:
content_base_file = f.read()
if decorator in content_base_file:
lines = content_base_file.split("\n")
line_numbers = [i for i, line in enumerate(lines) if decorator in line]
for line_number in line_numbers:
line_end = line_number
end_patterns = ["class ", " def"]
stop_condition = False
while line_end < len(lines) and not stop_condition:
line_end += 1
stop_condition = any(lines[line_end].startswith(end_pattern) for end_pattern in end_patterns)
candidate_patterns = ["class ", " def"]
candidate = any(
lines[line_end].startswith(candidate_pattern) for candidate_pattern in candidate_patterns
)
if stop_condition and candidate:
auto_docstrings_files.append(file_path)
break
return auto_docstrings_files
def get_auto_docstring_candidate_lines(lines):
"""
For a file's lines, find the start and end line indices of all @auto_docstring candidates.
Returns two lists: starts and ends.
"""
line_numbers = [i for i, line in enumerate(lines) if "@auto_docstring" in line]
line_starts_candidates = []
line_ends_candidates = []
for line_number in line_numbers:
line_end = line_number
end_patterns = ["class ", " def"]
stop_condition = False
while line_end < len(lines) and not stop_condition:
line_end += 1
stop_condition = any(lines[line_end].startswith(end_pattern) for end_pattern in end_patterns)
candidate_patterns = ["class ", " def"]
candidate = any(lines[line_end].startswith(candidate_pattern) for candidate_pattern in candidate_patterns)
if stop_condition and candidate:
line_ends_candidates.append(line_end)
line_starts_candidates.append(line_number)
return line_starts_candidates, line_ends_candidates
def get_args_in_signature(lines, signature_content):
signature_content = [line.split("#")[0] for line in signature_content]
signature_content = "".join(signature_content)
signature_content = "".join(signature_content.split(")")[:-1])
args_in_signature = re.findall(r"[,(]\s*(\w+)\s*(?=:|=|,|\))", signature_content)
if "self" in args_in_signature:
args_in_signature.remove("self")
return args_in_signature
def get_args_in_dataclass(lines, dataclass_content):
dataclass_content = [line.split("#")[0] for line in dataclass_content]
dataclass_content = "\n".join(dataclass_content)
args_in_dataclass = re.findall(r"^ (\w+)(?:\s*:|\s*=|\s*$)", dataclass_content, re.MULTILINE)
if "self" in args_in_dataclass:
args_in_dataclass.remove("self")
return args_in_dataclass
def generate_new_docstring_for_signature(
lines,
args_in_signature,
sig_end_line,
docstring_start_line,
arg_indent=" ",
output_docstring_indent=8,
custom_args_dict={},
source_args_doc=[ModelArgs, ImageProcessorArgs],
):
"""
Generalized docstring generator for a function or class signature.
Args:
lines: List of lines from the file.
sig_start_line: Line index where the signature starts.
sig_end_line: Line index where the signature ends.
docstring_line: Line index where the docstring starts (or None if not present).
arg_indent: Indentation for missing argument doc entries.
Returns:
new_docstring, sig_end_line, docstring_end (last docstring line index)
"""
# Extract and clean signature
missing_docstring_args = []
docstring_args_ro_remove = []
fill_docstring_args = []
# Parse docstring if present
args_docstring_dict = {}
remaining_docstring = ""
if docstring_start_line is not None:
docstring_end_line = _find_docstring_end_line(lines, docstring_start_line)
docstring_content = lines[docstring_start_line : docstring_end_line + 1]
parsed_docstring, remaining_docstring = parse_docstring("\n".join(docstring_content))
args_docstring_dict.update(parsed_docstring)
else:
docstring_end_line = None
# Remove args that are the same as the ones in the source args doc
for arg in args_docstring_dict:
if arg in get_args_doc_from_source(source_args_doc) and arg not in ALWAYS_OVERRIDE:
source_arg_doc = get_args_doc_from_source(source_args_doc)[arg]
if source_arg_doc["description"].strip("\n ") == args_docstring_dict[arg]["description"].strip("\n "):
if source_arg_doc.get("shape") is not None and args_docstring_dict[arg].get("shape") is not None:
if source_arg_doc.get("shape").strip("\n ") == args_docstring_dict[arg].get("shape").strip("\n "):
docstring_args_ro_remove.append(arg)
elif (
source_arg_doc.get("additional_info") is not None
and args_docstring_dict[arg].get("additional_info") is not None
):
if source_arg_doc.get("additional_info").strip("\n ") == args_docstring_dict[arg].get(
"additional_info"
).strip("\n "):
docstring_args_ro_remove.append(arg)
else:
docstring_args_ro_remove.append(arg)
args_docstring_dict = {
arg: args_docstring_dict[arg] for arg in args_docstring_dict if arg not in docstring_args_ro_remove
}
# Fill missing args
for arg in args_in_signature:
if (
arg not in args_docstring_dict
and arg not in get_args_doc_from_source(source_args_doc)
and arg not in custom_args_dict
):
missing_docstring_args.append(arg)
args_docstring_dict[arg] = {
"type": "<fill_type>",
"optional": False,
"shape": None,
"description": "\n <fill_docstring>",
"default": None,
"additional_info": None,
}
# Handle docstring of inherited args (for dataclasses)
ordered_args_docstring_dict = OrderedDict(
(arg, args_docstring_dict[arg]) for arg in args_docstring_dict if arg not in args_in_signature
)
# Add args in the order of the signature
ordered_args_docstring_dict.update(
(arg, args_docstring_dict[arg]) for arg in args_in_signature if arg in args_docstring_dict
)
# Build new docstring
new_docstring = ""
if len(ordered_args_docstring_dict) > 0 or remaining_docstring:
new_docstring += 'r"""\n'
for arg in ordered_args_docstring_dict:
additional_info = ordered_args_docstring_dict[arg]["additional_info"] or ""
custom_arg_description = ordered_args_docstring_dict[arg]["description"]
if "<fill_docstring>" in custom_arg_description and arg not in missing_docstring_args:
fill_docstring_args.append(arg)
if custom_arg_description.endswith('"""'):
custom_arg_description = "\n".join(custom_arg_description.split("\n")[:-1])
new_docstring += (
f"{arg} ({ordered_args_docstring_dict[arg]['type']}{additional_info}):{custom_arg_description}\n"
)
close_docstring = True
if remaining_docstring:
if remaining_docstring.endswith('"""'):
close_docstring = False
end_docstring = "\n" if close_docstring else ""
new_docstring += f"{set_min_indent(remaining_docstring, 0)}{end_docstring}"
if close_docstring:
new_docstring += '"""'
new_docstring = set_min_indent(new_docstring, output_docstring_indent)
return (
new_docstring,
sig_end_line,
docstring_end_line if docstring_end_line is not None else sig_end_line - 1,
missing_docstring_args,
fill_docstring_args,
docstring_args_ro_remove,
)
def generate_new_docstring_for_function(lines, current_line_end, custom_args_dict):
"""
Wrapper for function docstring generation using the generalized helper.
"""
sig_end_line = _find_sig_line(lines, current_line_end)
signature_content = lines[current_line_end:sig_end_line]
args_in_signature = get_args_in_signature(lines, signature_content)
docstring_start_line = sig_end_line if '"""' in lines[sig_end_line] else None
return generate_new_docstring_for_signature(
lines,
args_in_signature,
sig_end_line,
docstring_start_line,
arg_indent=" ",
custom_args_dict=custom_args_dict,
)
def generate_new_docstring_for_class(lines, current_line_end, custom_args_dict):
"""
Wrapper for class docstring generation (via __init__) using the generalized helper.
Returns the new docstring and relevant signature/docstring indices.
"""
sig_start_line = current_line_end
found_init_method = False
found_model_output = False
while sig_start_line < len(lines) - 1 and not found_init_method:
sig_start_line += 1
if " def __init__" in lines[sig_start_line]:
found_init_method = True
elif lines[sig_start_line].startswith("class ") or lines[sig_start_line].startswith("def "):
break
if not found_init_method:
if "ModelOutput" in lines[current_line_end]:
found_model_output = True
sig_start_line = current_line_end
else:
return "", None, None, [], [], []
if found_init_method:
sig_end_line = _find_sig_line(lines, sig_start_line)
signature_content = lines[sig_start_line:sig_end_line]
args_in_signature = get_args_in_signature(lines, signature_content)
else:
# we have a ModelOutput class, the class attributes are the args
sig_end_line = sig_start_line + 1
docstring_end = _find_docstring_end_line(lines, sig_end_line)
model_output_class_start = docstring_end + 1 if docstring_end is not None else sig_end_line - 1
model_output_class_end = model_output_class_start
while model_output_class_end < len(lines) and (
lines[model_output_class_end].startswith(" ") or lines[model_output_class_end] == ""
):
model_output_class_end += 1
dataclass_content = lines[model_output_class_start : model_output_class_end - 1]
args_in_signature = get_args_in_dataclass(lines, dataclass_content)
docstring_start_line = sig_end_line if '"""' in lines[sig_end_line] else None
return generate_new_docstring_for_signature(
lines,
args_in_signature,
sig_end_line,
docstring_start_line,
arg_indent="",
custom_args_dict=custom_args_dict,
output_docstring_indent=4 if found_model_output else 8,
source_args_doc=[ModelArgs, ImageProcessorArgs] if not found_model_output else [ModelOutputArgs],
)
def find_custom_args_with_details(file_content: str, custom_args_var_name: str) -> list[dict]:
"""
Find the given custom args variable in the file content and return its content.
Args:
file_content: The string content of the Python file.
custom_args_var_name: The name of the custom args variable.
"""
# Escape the variable_name to handle any special regex characters it might contain
escaped_variable_name = re.escape(custom_args_var_name)
# Construct the regex pattern dynamically with the specific variable name
# This regex looks for:
# ^\s* : Start of a line with optional leading whitespace.
# ({escaped_variable_name}) : Capture the exact variable name.
# \s*=\s* : An equals sign, surrounded by optional whitespace.
# (r?\"\"\") : Capture the opening triple quotes (raw or normal string).
# (.*?) : Capture the content (non-greedy).
# (\"\"\") : Match the closing triple quotes.
regex_pattern = rf"^\s*({escaped_variable_name})\s*=\s*(r?\"\"\")(.*?)(\"\"\")"
flags = re.MULTILINE | re.DOTALL
# Use re.search to find the first match
match = re.search(regex_pattern, file_content, flags)
if match:
# match.group(1) will be the variable_name itself
# match.group(3) will be the content inside the triple quotes
content = match.group(3).strip()
return content
return None
def update_file_with_new_docstrings(
candidate_file, lines, line_starts_candidates, line_ends_candidates, overwrite=False
):
"""
For a given file, update the docstrings for all @auto_docstring candidates and write the new content.
"""
content_base_file_new_lines = lines[: line_ends_candidates[0]]
current_line_start = line_starts_candidates[0]
current_line_end = line_ends_candidates[0]
index = 1
missing_docstring_args_warnings = []
fill_docstring_args_warnings = []
docstring_args_ro_remove_warnings = []
while index <= len(line_starts_candidates):
custom_args_dict = {}
auto_docstring_signature_content = "".join(lines[current_line_start:current_line_end])
match = re.findall(r"custom_args=(\w+)", auto_docstring_signature_content)
if match:
custom_args_var_name = match[0]
custom_args_var_content = find_custom_args_with_details("\n".join(lines), custom_args_var_name)
if custom_args_var_content:
custom_args_dict, _ = parse_docstring(custom_args_var_content)
new_docstring = ""
modify_class_docstring = False
# Function
if " def" in lines[current_line_end]:
(
new_docstring,
sig_line_end,
docstring_end,
missing_docstring_args,
fill_docstring_args,
docstring_args_ro_remove,
) = generate_new_docstring_for_function(lines, current_line_end, custom_args_dict)
# Class
elif "class " in lines[current_line_end]:
(
new_docstring,
class_sig_line_end,
class_docstring_end_line,
missing_docstring_args,
fill_docstring_args,
docstring_args_ro_remove,
) = generate_new_docstring_for_class(lines, current_line_end, custom_args_dict)
modify_class_docstring = class_sig_line_end is not None
# Add warnings if needed
if missing_docstring_args:
for arg in missing_docstring_args:
missing_docstring_args_warnings.append(f" - {arg} line {current_line_end}")
if fill_docstring_args:
for arg in fill_docstring_args:
fill_docstring_args_warnings.append(f" - {arg} line {current_line_end}")
if docstring_args_ro_remove:
for arg in docstring_args_ro_remove:
docstring_args_ro_remove_warnings.append(f" - {arg} line {current_line_end}")
# Write new lines
if index >= len(line_ends_candidates) or line_ends_candidates[index] > current_line_end:
if " def" in lines[current_line_end]:
content_base_file_new_lines += lines[current_line_end:sig_line_end]
if new_docstring != "":
content_base_file_new_lines += new_docstring.split("\n")
if index < len(line_ends_candidates):
content_base_file_new_lines += lines[docstring_end + 1 : line_ends_candidates[index]]
else:
content_base_file_new_lines += lines[docstring_end + 1 :]
elif modify_class_docstring:
content_base_file_new_lines += lines[current_line_end:class_sig_line_end]
if new_docstring != "":
content_base_file_new_lines += new_docstring.split("\n")
if index < len(line_ends_candidates):
content_base_file_new_lines += lines[class_docstring_end_line + 1 : line_ends_candidates[index]]
else:
content_base_file_new_lines += lines[class_docstring_end_line + 1 :]
elif index < len(line_ends_candidates):
content_base_file_new_lines += lines[current_line_end : line_ends_candidates[index]]
else:
content_base_file_new_lines += lines[current_line_end:]
if index < len(line_ends_candidates):
current_line_end = line_ends_candidates[index]
current_line_start = line_starts_candidates[index]
index += 1
content_base_file_new = "\n".join(content_base_file_new_lines)
if overwrite:
with open(candidate_file, "w", encoding="utf-8") as f:
f.write(content_base_file_new)
return (
missing_docstring_args_warnings,
fill_docstring_args_warnings,
docstring_args_ro_remove_warnings,
)
# TODO (Yoni): The functions in check_auto_docstrings rely on direct code parsing, which is prone to
# failure on edge cases and not robust to code changes. While this approach is significantly faster
# than using inspect (like in check_docstrings) and allows parsing any object including non-public
# ones, it may need to be refactored in the future to use a more robust parsing method. Note that
# we still need auto_docstring for some non-public objects since their docstrings are included in the
# docs of public objects (e.g. ModelOutput classes).
def check_auto_docstrings(overwrite: bool = False, check_all: bool = False):
"""
Check docstrings of all public objects that are decorated with `@auto_docstrings`.
This function orchestrates the process by finding relevant files, scanning for decorators,
generating new docstrings, and updating files as needed.
"""
# 1. Find all model files to check
matching_files = find_matching_model_files(check_all)
if matching_files is None:
return
# 2. Find files that contain the @auto_docstring decorator
auto_docstrings_files = find_files_with_auto_docstring(matching_files)
# 3. For each file, update docstrings for all candidates
for candidate_file in auto_docstrings_files:
with open(candidate_file, "r", encoding="utf-8") as f:
lines = f.read().split("\n")
line_starts_candidates, line_ends_candidates = get_auto_docstring_candidate_lines(lines)
missing_docstring_args_warnings, fill_docstring_args_warnings, docstring_args_ro_remove_warnings = (
update_file_with_new_docstrings(
candidate_file, lines, line_starts_candidates, line_ends_candidates, overwrite=overwrite
)
)
if missing_docstring_args_warnings:
if not overwrite:
print(
"Some docstrings are missing. Run `make fix-copies` or `python utils/check_docstrings.py --fix_and_overwrite` to generate the docstring templates where needed."
)
print(f"🚨 Missing docstring for the following arguments in {candidate_file}:")
for warning in missing_docstring_args_warnings:
print(warning)
if docstring_args_ro_remove_warnings:
if not overwrite:
print(
"Some docstrings are redundant with the ones in `auto_docstring.py` and will be removed. Run `make fix-copies` or `python utils/check_docstrings.py --fix_and_overwrite` to remove the redundant docstrings."
)
print(f"🚨 Redundant docstring for the following arguments in {candidate_file}:")
for warning in docstring_args_ro_remove_warnings:
print(warning)
if fill_docstring_args_warnings:
print(f"🚨 Docstring needs to be filled for the following arguments in {candidate_file}:")
for warning in fill_docstring_args_warnings:
print(warning)
def check_docstrings(overwrite: bool = False, check_all: bool = False):
"""
Check docstrings of all public objects that are callables and are documented. By default, only checks the diff.
Args:
overwrite (`bool`, *optional*, defaults to `False`):
Whether to fix inconsistencies or not.
check_all (`bool`, *optional*, defaults to `False`):
Whether to check all files.
"""
module_diff_files = None
if not check_all:
module_diff_files = set()
repo = Repo(PATH_TO_REPO)
# Diff from index to unstaged files
for modified_file_diff in repo.index.diff(None):
if modified_file_diff.a_path.startswith("src/transformers"):
module_diff_files.add(modified_file_diff.a_path)
# Diff from index to `main`
for modified_file_diff in repo.index.diff(repo.refs.main.commit):
if modified_file_diff.a_path.startswith("src/transformers"):
module_diff_files.add(modified_file_diff.a_path)
# quick escape route: if there are no module files in the diff, skip this check
if len(module_diff_files) == 0:
return
print(" Checking docstrings in the following files:" + "\n - " + "\n - ".join(module_diff_files))
failures = []
hard_failures = []
to_clean = []
for name in dir(transformers):
# Skip objects that are private or not documented.
if name.startswith("_") or ignore_undocumented(name) or name in OBJECTS_TO_IGNORE:
continue
obj = getattr(transformers, name)
if not callable(obj) or not isinstance(obj, type) or getattr(obj, "__doc__", None) is None:
continue
# If we are checking against the diff, we skip objects that are not part of the diff.
if module_diff_files is not None:
object_file = find_source_file(getattr(transformers, name))
object_file_relative_path = "src/" + str(object_file).split("/src/")[1]
if object_file_relative_path not in module_diff_files:
continue
# Check docstring
try:
result = match_docstring_with_signature(obj)
if result is not None:
old_doc, new_doc = result
else:
old_doc, new_doc = None, None
except Exception as e:
print(e)
hard_failures.append(name)
continue
if old_doc != new_doc:
if overwrite:
fix_docstring(obj, old_doc, new_doc)
else:
failures.append(name)
elif not overwrite and new_doc is not None and ("<fill_type>" in new_doc or "<fill_docstring>" in new_doc):
to_clean.append(name)
# Deal with errors
error_message = ""
if len(hard_failures) > 0:
error_message += (
"The argument part of the docstrings of the following objects could not be processed, check they are "
"properly formatted."
)
error_message += "\n" + "\n".join([f"- {name}" for name in hard_failures])
if len(failures) > 0:
error_message += (
"The following objects docstrings do not match their signature. Run `make fix-copies` to fix this. "
"In some cases, this error may be raised incorrectly by the docstring checker. If you think this is the "
"case, you can manually check the docstrings and then add the object name to `OBJECTS_TO_IGNORE` in "
"`utils/check_docstrings.py`."
)
error_message += "\n" + "\n".join([f"- {name}" for name in failures])
if len(to_clean) > 0:
error_message += (
"The following objects docstrings contain templates you need to fix: search for `<fill_type>` or "
"`<fill_docstring>`."
)
error_message += "\n" + "\n".join([f"- {name}" for name in to_clean])
if len(error_message) > 0:
error_message = "There was at least one problem when checking docstrings of public objects.\n" + error_message
raise ValueError(error_message)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
parser.add_argument(
"--check_all", action="store_true", help="Whether to check all files. By default, only checks the diff"
)
args = parser.parse_args()
check_auto_docstrings(overwrite=args.fix_and_overwrite, check_all=args.check_all)
check_docstrings(overwrite=args.fix_and_overwrite, check_all=args.check_all)
| transformers/utils/check_docstrings.py/0 | {
"file_path": "transformers/utils/check_docstrings.py",
"repo_id": "transformers",
"token_count": 25528
} | 628 |
"""Script for downloading all GLUE data.
Original source: https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e
Note: for legal reasons, we are unable to host MRPC.
You can either use the version hosted by the SentEval team, which is already tokenized,
or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually.
For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example).
You should then rename and place specific files in a folder (see below for an example).
mkdir MRPC
cabextract MSRParaphraseCorpus.msi -d MRPC
cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt
cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt
rm MRPC/_*
rm MSRParaphraseCorpus.msi
1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now.
2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray!
"""
import argparse
import os
import sys
import urllib.request
import zipfile
TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"]
TASK2PATH = {
"CoLA": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4",
"SST": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8",
"MRPC": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc",
"QQP": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5",
"STS": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5",
"MNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce",
"SNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df",
"QNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601",
"RTE": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb",
"WNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf",
"diagnostic": "https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D",
}
MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
def download_and_extract(task, data_dir):
print(f"Downloading and extracting {task}...")
data_file = f"{task}.zip"
urllib.request.urlretrieve(TASK2PATH[task], data_file)
with zipfile.ZipFile(data_file) as zip_ref:
zip_ref.extractall(data_dir)
os.remove(data_file)
print("\tCompleted!")
def format_mrpc(data_dir, path_to_data):
print("Processing MRPC...")
mrpc_dir = os.path.join(data_dir, "MRPC")
if not os.path.isdir(mrpc_dir):
os.mkdir(mrpc_dir)
if path_to_data:
mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt")
else:
print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN)
mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file)
urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file)
if not os.path.isfile(mrpc_train_file):
raise ValueError(f"Train data not found at {mrpc_train_file}")
if not os.path.isfile(mrpc_test_file):
raise ValueError(f"Test data not found at {mrpc_test_file}")
urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv"))
dev_ids = []
with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh:
for row in ids_fh:
dev_ids.append(row.strip().split("\t"))
with (
open(mrpc_train_file, encoding="utf8") as data_fh,
open(os.path.join(mrpc_dir, "train.tsv"), "w", encoding="utf8") as train_fh,
open(os.path.join(mrpc_dir, "dev.tsv"), "w", encoding="utf8") as dev_fh,
):
header = data_fh.readline()
train_fh.write(header)
dev_fh.write(header)
for row in data_fh:
label, id1, id2, s1, s2 = row.strip().split("\t")
if [id1, id2] in dev_ids:
dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
else:
train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
with (
open(mrpc_test_file, encoding="utf8") as data_fh,
open(os.path.join(mrpc_dir, "test.tsv"), "w", encoding="utf8") as test_fh,
):
header = data_fh.readline()
test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n")
for idx, row in enumerate(data_fh):
label, id1, id2, s1, s2 = row.strip().split("\t")
test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2))
print("\tCompleted!")
def download_diagnostic(data_dir):
print("Downloading and extracting diagnostic...")
if not os.path.isdir(os.path.join(data_dir, "diagnostic")):
os.mkdir(os.path.join(data_dir, "diagnostic"))
data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv")
urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file)
print("\tCompleted!")
return
def get_tasks(task_names):
task_names = task_names.split(",")
if "all" in task_names:
tasks = TASKS
else:
tasks = []
for task_name in task_names:
if task_name not in TASKS:
raise ValueError(f"Task {task_name} not found!")
tasks.append(task_name)
return tasks
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", help="directory to save data to", type=str, default="glue_data")
parser.add_argument(
"--tasks", help="tasks to download data for as a comma separated string", type=str, default="all"
)
parser.add_argument(
"--path_to_mrpc",
help="path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt",
type=str,
default="",
)
args = parser.parse_args(arguments)
if not os.path.isdir(args.data_dir):
os.mkdir(args.data_dir)
tasks = get_tasks(args.tasks)
for task in tasks:
if task == "MRPC":
format_mrpc(args.data_dir, args.path_to_mrpc)
elif task == "diagnostic":
download_diagnostic(args.data_dir)
else:
download_and_extract(task, args.data_dir)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| transformers/utils/download_glue_data.py/0 | {
"file_path": "transformers/utils/download_glue_data.py",
"repo_id": "transformers",
"token_count": 3943
} | 629 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import time
from get_ci_error_statistics import get_jobs
from slack_sdk import WebClient
client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def handle_test_results(test_results):
expressions = test_results.split(" ")
failed = 0
success = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(expressions):
if "failed" in expression:
failed += int(expressions[i - 1])
if "passed" in expression:
success += int(expressions[i - 1])
return failed, success, time_spent
def extract_first_line_failure(failures_short_lines):
failures = {}
file = None
in_error = False
for line in failures_short_lines.split("\n"):
if re.search(r"_ \[doctest\]", line):
in_error = True
file = line.split(" ")[2]
elif in_error and not line.split(" ")[0].isdigit():
failures[file] = line
in_error = False
return failures
class Message:
def __init__(self, title: str, doc_test_results: dict):
self.title = title
self.n_success = sum(job_result["n_success"] for job_result in doc_test_results.values())
self.n_failures = sum(job_result["n_failures"] for job_result in doc_test_results.values())
self.n_tests = self.n_success + self.n_failures
# Failures and success of the modeling tests
self.doc_test_results = doc_test_results
@property
def time(self) -> str:
all_results = [*self.doc_test_results.values()]
time_spent = [r["time_spent"].split(", ")[0] for r in all_results if len(r["time_spent"])]
total_secs = 0
for time in time_spent:
time_parts = time.split(":")
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(time_parts) == 1:
time_parts = [0, 0, time_parts[0]]
hours, minutes, seconds = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(hours)}h{int(minutes)}m{int(seconds)}s"
@property
def header(self) -> dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def no_failures(self) -> dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def failures(self) -> dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def category_failures(self) -> list[dict]:
failure_blocks = []
MAX_ERROR_TEXT = 3000 - len("The following examples had failures:\n\n\n\n") - len("[Truncated]\n")
line_length = 40
category_failures = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(v, dict)}
def single_category_failures(category, failures):
text = ""
if len(failures) == 0:
return ""
text += f"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n"
for idx, failure in enumerate(failures):
new_text = text + f"`{failure}`\n"
if len(new_text) > MAX_ERROR_TEXT:
text = text + "[Truncated]\n"
break
text = new_text
return text
for category, failures in category_failures.items():
report = single_category_failures(category, failures)
if len(report) == 0:
continue
block = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
failure_blocks.append(block)
return failure_blocks
@property
def payload(self) -> str:
blocks = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend(self.category_failures)
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(blocks)
@staticmethod
def error_out():
payload = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(payload)}))
client.chat_postMessage(
channel=SLACK_REPORT_CHANNEL_ID,
text="There was an issue running the tests.",
blocks=payload,
)
def post(self):
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(self.payload)}))
text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
self.thread_ts = client.chat_postMessage(
channel=SLACK_REPORT_CHANNEL_ID,
blocks=self.payload,
text=text,
)
def get_reply_blocks(self, job_name, job_link, failures, text):
# `text` must be less than 3001 characters in Slack SDK
# keep some room for adding "[Truncated]" when necessary
MAX_ERROR_TEXT = 3000 - len("[Truncated]")
failure_text = ""
for key, value in failures.items():
new_text = failure_text + f"*{key}*\n_{value}_\n\n"
if len(new_text) > MAX_ERROR_TEXT:
# `failure_text` here has length <= 3000
failure_text = failure_text + "[Truncated]"
break
# `failure_text` here has length <= MAX_ERROR_TEXT
failure_text = new_text
title = job_name
content = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
content["accessory"] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title, "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failure_text}},
]
def post_reply(self):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made.")
sorted_dict = sorted(self.doc_test_results.items(), key=lambda t: t[0])
for job_name, job_result in sorted_dict:
if len(job_result["failures"]) > 0:
text = f"*Num failures* :{len(job_result['failed'])} \n"
failures = job_result["failures"]
blocks = self.get_reply_blocks(job_name, job_result["job_link"], failures, text=text)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=SLACK_REPORT_CHANNEL_ID,
text=f"Results for {job_name}",
blocks=blocks,
thread_ts=self.thread_ts["ts"],
)
time.sleep(1)
def retrieve_artifact(name: str):
_artifact = {}
if os.path.exists(name):
files = os.listdir(name)
for file in files:
try:
with open(os.path.join(name, file), encoding="utf-8") as f:
_artifact[file.split(".")[0]] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(name, file)}.") from e
return _artifact
def retrieve_available_artifacts():
class Artifact:
def __init__(self, name: str):
self.name = name
self.paths = []
def __str__(self):
return self.name
def add_path(self, path: str):
self.paths.append({"name": self.name, "path": path})
_available_artifacts: dict[str, Artifact] = {}
directories = filter(os.path.isdir, os.listdir())
for directory in directories:
artifact_name = directory
if artifact_name not in _available_artifacts:
_available_artifacts[artifact_name] = Artifact(artifact_name)
_available_artifacts[artifact_name].add_path(directory)
return _available_artifacts
if __name__ == "__main__":
SLACK_REPORT_CHANNEL_ID = os.environ["SLACK_REPORT_CHANNEL"]
github_actions_jobs = get_jobs(
workflow_run_id=os.environ["GITHUB_RUN_ID"], token=os.environ["ACCESS_REPO_INFO_TOKEN"]
)
artifact_name_to_job_map = {}
for job in github_actions_jobs:
for step in job["steps"]:
if step["name"].startswith("Test suite reports artifacts: "):
artifact_name = step["name"][len("Test suite reports artifacts: ") :]
artifact_name_to_job_map[artifact_name] = job
break
available_artifacts = retrieve_available_artifacts()
doc_test_results = {}
# `artifact_key` is the artifact path
for artifact_obj in available_artifacts.values():
artifact_path = artifact_obj.paths[0]
if not artifact_path["path"].startswith("doc_tests_gpu_test_reports_"):
continue
# change "_" back to "/" (to show the job name as path)
job_name = artifact_path["path"].replace("doc_tests_gpu_test_reports_", "").replace("_", "/")
# This dict (for each job) will contain all the information relative to each doc test job, in particular:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
job_result = {}
doc_test_results[job_name] = job_result
job = artifact_name_to_job_map[artifact_path["path"]]
job_result["job_link"] = job["html_url"]
job_result["category"] = "Python Examples" if job_name.startswith("src/") else "MD Examples"
artifact = retrieve_artifact(artifact_path["path"])
if "stats" in artifact:
failed, success, time_spent = handle_test_results(artifact["stats"])
job_result["n_failures"] = failed
job_result["n_success"] = success
job_result["time_spent"] = time_spent[1:-1] + ", "
job_result["failed"] = []
job_result["failures"] = {}
all_failures = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
line = line.replace("FAILED ", "")
line = line.split()[0].replace("\n", "")
if "::" in line:
file_path, test = line.split("::")
else:
file_path, test = line, line
job_result["failed"].append(test)
failure = all_failures.get(test, "N/A")
job_result["failures"][test] = failure
# Save and to be uploaded as artifact
os.makedirs("doc_test_results", exist_ok=True)
with open("doc_test_results/doc_test_results.json", "w", encoding="UTF-8") as fp:
json.dump(doc_test_results, fp, ensure_ascii=False, indent=4)
message = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| transformers/utils/notification_service_doc_tests.py/0 | {
"file_path": "transformers/utils/notification_service_doc_tests.py",
"repo_id": "transformers",
"token_count": 6464
} | 630 |
from transformers import PretrainedConfig
class CustomConfig(PretrainedConfig):
model_type = "custom"
def __init__(self, attribute=1, **kwargs):
self.attribute = attribute
super().__init__(**kwargs)
| transformers/utils/test_module/custom_configuration.py/0 | {
"file_path": "transformers/utils/test_module/custom_configuration.py",
"repo_id": "transformers",
"token_count": 80
} | 631 |
include LICENSE
include CONTRIBUTING.md
include README.md
recursive-exclude * __pycache__
include trl/templates/*.md
include trl/accelerate_configs/*.yaml | trl/MANIFEST.in/0 | {
"file_path": "trl/MANIFEST.in",
"repo_id": "trl",
"token_count": 53
} | 632 |
# Data Utilities
## prepare_multimodal_messages
[[autodoc]] prepare_multimodal_messages
## is_conversational
[[autodoc]] is_conversational
## is_conversational_from_value
[[autodoc]] is_conversational_from_value
## apply_chat_template
[[autodoc]] apply_chat_template
## maybe_apply_chat_template
[[autodoc]] maybe_apply_chat_template
## maybe_convert_to_chatml
[[autodoc]] maybe_convert_to_chatml
## extract_prompt
[[autodoc]] extract_prompt
## maybe_extract_prompt
[[autodoc]] maybe_extract_prompt
## unpair_preference_dataset
[[autodoc]] unpair_preference_dataset
## maybe_unpair_preference_dataset
[[autodoc]] maybe_unpair_preference_dataset
## pack_dataset
[[autodoc]] pack_dataset
## truncate_dataset
[[autodoc]] truncate_dataset
| trl/docs/source/data_utils.md/0 | {
"file_path": "trl/docs/source/data_utils.md",
"repo_id": "trl",
"token_count": 296
} | 633 |
# Liger Kernel Integration
<Tip warning={true}>
Section under construction. Feel free to contribute!
</Tip>
[Liger Kernel](https://github.com/linkedin/Liger-Kernel) is a collection of Triton kernels designed specifically for LLM training. It can effectively increase multi-GPU training throughput by 20% and reduce memory usage by 60%. That way, we can **4x** our context length, as described in the benchmark below. They have implemented Hugging Face compatible `RMSNorm`, `RoPE`, `SwiGLU`, `CrossEntropy`, `FusedLinearCrossEntropy`, with more to come. The kernel works out of the box with [FlashAttention](https://github.com/Dao-AILab/flash-attention), [PyTorch FSDP](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html), and [Microsoft DeepSpeed](https://github.com/microsoft/DeepSpeed).
With this memory reduction, you can potentially turn off `cpu_offloading` or gradient checkpointing to further boost the performance.
| Speed Up | Memory Reduction |
|--------------------------|-------------------------|
|  |  |
1. To use Liger-Kernel in [`SFTTrainer`], first install it by:
```bash
pip install liger-kernel
```
2. Once installed, set `use_liger_kernel` in [`SFTConfig`]. No other changes are needed!
```python
training_args = SFTConfig(
use_liger_kernel=True,
...
)
```
To learn more about Liger-Kernel, visit their [official repository](https://github.com/linkedin/Liger-Kernel/).
| trl/docs/source/liger_kernel_integration.md/0 | {
"file_path": "trl/docs/source/liger_kernel_integration.md",
"repo_id": "trl",
"token_count": 513
} | 634 |
# Reward Functions
This module contains some useful reward functions, primarily intended for use with the [`GRPOTrainer`].
## Format rewards
### think_format_reward
[[autodoc]] rewards.think_format_reward
## Other rewards
### get_soft_overlong_punishment
[[autodoc]] rewards.get_soft_overlong_punishment
| trl/docs/source/rewards.md/0 | {
"file_path": "trl/docs/source/rewards.md",
"repo_id": "trl",
"token_count": 93
} | 635 |
# Requires accelerate 1.7.0 or higher
compute_environment: LOCAL_MACHINE
debug: false
distributed_type: FSDP
downcast_bf16: 'no'
enable_cpu_affinity: false
fsdp_config:
fsdp_activation_checkpointing: false
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_cpu_ram_efficient_loading: true
fsdp_offload_params: false
fsdp_reshard_after_forward: true
fsdp_state_dict_type: FULL_STATE_DICT
fsdp_version: 2
machine_rank: 0
main_training_function: main
mixed_precision: bf16
num_machines: 1
num_processes: 8
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
| trl/examples/accelerate_configs/fsdp2.yaml/0 | {
"file_path": "trl/examples/accelerate_configs/fsdp2.yaml",
"repo_id": "trl",
"token_count": 248
} | 636 |
<jupyter_start><jupyter_text>**Best-of-n sampling as an alternative to RLHF**This notebook compares reward-model scores of prompt based responses from 1. a base model (`gpt2-imdb`)2. `RLHF` tuned model based on this base-model 3. the base-model again from which we sample n responses to each prompt, score them and take the best scored one AKA the `best-of-n sampled` modelImport dependencies<jupyter_code>%pip install transformers trl
import torch
import pandas as pd
from transformers import pipeline, AutoTokenizer
from datasets import load_dataset
from trl import AutoModelForCausalLMWithValueHead
from trl.core import LengthSampler
device = "cuda" if torch.cuda.is_available() else "cpu"<jupyter_output><empty_output><jupyter_text>Various constants<jupyter_code>ref_model_name = "lvwerra/gpt2-imdb"
model_name = "lvwerra/gpt2-imdb-pos-v2"
reward_model = "lvwerra/distilbert-imdb"
N_BEST_OF = 4<jupyter_output><empty_output><jupyter_text>Models and tokenizers<jupyter_code>model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name)
ref_model = AutoModelForCausalLMWithValueHead.from_pretrained(ref_model_name)
reward_pipe = pipeline("sentiment-analysis", model=reward_model, device=device)
tokenizer = AutoTokenizer.from_pretrained(ref_model_name)
tokenizer.pad_token = tokenizer.eos_token
# cuda-ize models
model.to(device)
ref_model.to(device)<jupyter_output>/Users/kashif/Github/transformers/src/transformers/tokenization_utils_base.py:1617: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be deprecated in transformers v4.45, and will be then set to `False` by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(<jupyter_text>Dataset building<jupyter_code>def build_dataset(
tokenizer,
dataset_name="stanfordnlp/imdb",
input_min_text_length=2,
input_max_text_length=8,
):
# load imdb with datasets
ds = load_dataset(dataset_name, split="train")
ds = ds.rename_columns({"text": "review"})
ds = ds.filter(lambda x: len(x["review"]) > 200, batched=False)
input_size = LengthSampler(input_min_text_length, input_max_text_length)
def tokenize(sample):
sample["input_ids"] = tokenizer.encode(sample["review"])[: input_size()]
sample["query"] = tokenizer.decode(sample["input_ids"])
return sample
ds = ds.map(tokenize, batched=False)
ds.set_format(type="torch")
return ds
dataset = build_dataset(tokenizer)
gen_kwargs = {
"min_length": -1,
"top_k": 0.0,
"top_p": 1.0,
"do_sample": True,
"pad_token_id": tokenizer.eos_token_id,
}
sent_kwargs = {"top_k": None, "function_to_apply": "none", "batch_size": 16}
output_min_length = 4
output_max_length = 16
output_length_sampler = LengthSampler(output_min_length, output_max_length)
#### get a batch from the dataset
bs = 16
output_data = dict()
dataset.set_format("pandas")
df_batch = dataset[:].sample(bs)
output_data["query"] = df_batch["query"].tolist()
query_tensors = df_batch["input_ids"].tolist()
# :: [Resp]
response_tensors_ref, response_tensors = [], []
# :: [[Resp]]
response_tensors_best_of = []<jupyter_output><empty_output><jupyter_text>Generation using various models<jupyter_code>for i in range(bs):
gen_len = output_length_sampler()
query = torch.tensor(query_tensors[i])
output = ref_model.generate(
query.unsqueeze(dim=0).to(device), max_new_tokens=gen_len, **gen_kwargs
).squeeze()
response_tensors_ref.append(tokenizer.decode(output))
output = model.generate(
query.unsqueeze(dim=0).to(device), max_new_tokens=gen_len, **gen_kwargs
).squeeze()
response_tensors.append(tokenizer.decode(output))
# generating copies of the same query for the Best-of-n sampling
queries = query.repeat((N_BEST_OF, 1))
output = ref_model.generate(
queries.to(device), max_new_tokens=gen_len, **gen_kwargs
).squeeze()
response_tensors_best_of.append(tokenizer.batch_decode(output))<jupyter_output>The attention mask is not set and cannot be inferred from input because pad token is same as eos token. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.<jupyter_text>Scoring<jupyter_code>scores_ref = [
output[0]["score"] for output in reward_pipe(response_tensors_ref, **sent_kwargs)
]
scores = [output[0]["score"] for output in reward_pipe(response_tensors, **sent_kwargs)]
scores_best_of = []
for i, response in enumerate(response_tensors_best_of):
# base_score = scores_ref[i]
scores_best_of.append(
torch.tensor(
[output[0]["score"] for output in reward_pipe(response, **sent_kwargs)]
)
)
output_data["response (ref)"] = response_tensors_ref
output_data["scores (ref)"] = scores_ref
output_data["response (RLHF)"] = response_tensors
output_data["scores (RLHF)"] = scores
output_data["response (best_of)"] = [
response_tensors_best_of[i][a.argmax().item()] for i, a in enumerate(scores_best_of)
]
output_data["scores (best_of)"] = [a.max().item() for a in scores_best_of]
# store results in a dataframe
df_results = pd.DataFrame(output_data)
df_results<jupyter_output><empty_output> | trl/examples/notebooks/best_of_n.ipynb/0 | {
"file_path": "trl/examples/notebooks/best_of_n.ipynb",
"repo_id": "trl",
"token_count": 1998
} | 637 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# /// script
# dependencies = [
# "trl @ git+https://github.com/huggingface/trl.git",
# "peft",
# "math-verify",
# "latex2sympy2_extended",
# ]
# ///
"""
pip install math_verify
# For Qwen/Qwen2.5-VL-3B-Instruct
accelerate launch \
--config_file examples/accelerate_configs/deepspeed_zero3.yaml \
examples/scripts/gspo_vlm.py \
--model_name_or_path Qwen/Qwen2.5-VL-3B-Instruct \
--output_dir gspo-Qwen2.5-VL-3B-Instruct \
--learning_rate 1e-5 \
--torch_dtype bfloat16 \
--max_prompt_length 2048 \
--max_completion_length 1024 \
--use_peft \
--lora_target_modules "q_proj", "v_proj" \
--log_completions \
--per_device_train_batch_size 8 \
--num_generations 8 \
--importance_sampling_level sequence \
--epsilon 3e-4 \
--epsilon_high 4e-4 \
--beta 0.0 \
--loss_type grpo \
--gradient_accumulation_steps 2 \
--steps_per_generation 8
"""
import torch
from datasets import load_dataset
from latex2sympy2_extended import NormalizationConfig
from math_verify import LatexExtractionConfig, parse, verify
from trl import (
GRPOConfig,
GRPOTrainer,
ModelConfig,
ScriptArguments,
TrlParser,
get_kbit_device_map,
get_peft_config,
get_quantization_config,
)
from trl.rewards import think_format_reward
if __name__ == "__main__":
parser = TrlParser((ScriptArguments, GRPOConfig, ModelConfig))
script_args, training_args, model_args = parser.parse_args_and_config()
################
# Model & Processor
################
torch_dtype = (
model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype)
)
quantization_config = get_quantization_config(model_args)
training_args.model_init_kwargs = dict(
revision=model_args.model_revision,
attn_implementation=model_args.attn_implementation,
torch_dtype=torch_dtype,
device_map=get_kbit_device_map() if quantization_config is not None else None,
quantization_config=quantization_config,
)
################
# Dataset
################
dataset = load_dataset("lmms-lab/multimodal-open-r1-8k-verified", split="train")
dataset = dataset.train_test_split(test_size=100, seed=42)
SYSTEM_PROMPT = (
"A conversation between user and assistant. The user asks a question, and the assistant solves it. The "
"assistant first thinks about the reasoning process in the mind and then provides the user with the answer. "
"The reasoning process and answer are enclosed within <think></think> tags, i.e., <think>\nThis is my "
"reasoning.\n</think>\nThis is my answer."
)
def make_conversation(example):
prompt = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": example["problem"]},
]
return {"prompt": prompt}
dataset = dataset.map(make_conversation)
# Filter have big images
def filter_big_images(example):
image = example["image"]
return image.size[0] < 512 and image.size[1] < 512
dataset = dataset.filter(filter_big_images)
def convert_to_rgb(example):
image = example["image"]
if image.mode != "RGB":
image = image.convert("RGB")
example["image"] = image
return example
dataset = dataset.map(convert_to_rgb)
train_dataset = dataset["train"]
eval_dataset = dataset["test"] if training_args.eval_strategy != "no" else None
################
# Reward Function for Training
################
def accuracy_reward(completions, solution: list[str], **kwargs):
"""Reward function that checks if the completion matches the ground truth.
- If both gold and prediction are parseable → use math verification.
- If not parseable → compare as normalized text.
"""
rewards = []
contents = [completion[0]["content"] for completion in completions]
for content, sol in zip(contents, solution):
try:
gold_parsed = parse(sol, extraction_mode="first_match")
except Exception:
gold_parsed = []
if len(gold_parsed) != 0:
# Try parsing predicted answer too
try:
answer_parsed = parse(
content,
extraction_config=[
LatexExtractionConfig(
normalization_config=NormalizationConfig(
nits=False,
malformed_operators=False,
basic_latex=True,
boxed="all",
units=True,
),
boxed_match_priority=0,
try_extract_without_anchor=False,
)
],
extraction_mode="first_match",
)
reward = float(verify(gold_parsed, answer_parsed))
except Exception as e:
print(f"verify failed: {e}, answer: {content}, gold: {sol}")
reward = None
else:
# fallback to text match
reward = float(content.strip().lower() == sol.strip().lower())
rewards.append(reward)
return rewards
################
# Training
################
trainer = GRPOTrainer(
model=model_args.model_name_or_path,
args=training_args,
reward_funcs=[think_format_reward, accuracy_reward],
train_dataset=train_dataset,
eval_dataset=eval_dataset,
peft_config=get_peft_config(model_args),
)
trainer.train()
# Save and push to hub
trainer.save_model(training_args.output_dir)
if training_args.push_to_hub:
trainer.push_to_hub(dataset_name=script_args.dataset_name)
| trl/examples/scripts/gspo_vlm.py/0 | {
"file_path": "trl/examples/scripts/gspo_vlm.py",
"repo_id": "trl",
"token_count": 2986
} | 638 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# /// script
# dependencies = [
# "trl @ git+https://github.com/huggingface/trl.git",
# "Pillow>=9.4.0",
# ]
# ///
"""
Train Gemma 3 on the HuggingFaceH4/llava-instruct-mix-vsft dataset (single-image).
accelerate launch \
--config_file examples/accelerate_configs/deepspeed_zero3.yaml \
examples/scripts/sft_vlm_gemma3.py \
--dataset_name HuggingFaceH4/llava-instruct-mix-vsft \
--model_name_or_path google/gemma-3-4b-it \
--per_device_train_batch_size 1 \
--output_dir Gemma-3-4B-SFT-MMIU \
--torch_dtype bfloat16 \
--use_peft \
--lora_target_modules all-linear \
--attn_implementation eager
Train Gemma 3 on the FanqingM/MMIU-Benchmark dataset (multi-image).
accelerate launch \
--config_file examples/accelerate_configs/deepspeed_zero3.yaml \
examples/scripts/sft_vlm_gemma3.py \
--dataset_name FanqingM/MMIU-Benchmark \
--dataset_train_split test \
--model_name_or_path google/gemma-3-4b-it \
--per_device_train_batch_size 1 \
--output_dir Gemma-3-4B-SFT-MMIU \
--torch_dtype bfloat16 \
--use_peft \
--lora_target_modules all-linear
--attn_implementation eager
"""
import io
import os
import zipfile
import torch
from datasets import DatasetDict, load_dataset
from huggingface_hub import hf_hub_download, list_repo_files
from PIL import Image
from transformers import AutoModelForImageTextToText
from trl import (
ModelConfig,
ScriptArguments,
SFTConfig,
SFTTrainer,
TrlParser,
get_kbit_device_map,
get_peft_config,
get_quantization_config,
)
# For multi-image example
def process_vision_info(messages: list[dict]) -> list[Image.Image]:
image_inputs = []
for msg in messages:
content = msg.get("content", [])
if not isinstance(content, list):
content = [content]
for element in content:
if isinstance(element, dict) and ("image" in element or element.get("type") == "image"):
if "image" in element:
image = element["image"]
else:
image = element
if image is not None:
image = Image.open(io.BytesIO(image["bytes"]))
image_inputs.append(image.convert("RGB"))
return image_inputs
def format_data(samples: dict[str, any]) -> dict[str, list]:
formatted_samples = {"messages": []}
for cont in range(len(samples["question"])):
images = []
for img_path in samples["input_image_path"][cont]:
try:
with open(img_path, "rb") as f:
img_bytes = f.read()
image = Image.open(io.BytesIO(img_bytes)).convert("RGB")
images.append({"type": "image", "image": image})
except Exception as e:
print(f"Error processing image {img_path}: {e}")
continue
formatted_samples["messages"].append(
[
{"role": "system", "content": [{"type": "text", "text": samples["context"][cont]}]},
{"role": "user", "content": images + [{"type": "text", "text": samples["question"][cont]}]},
{"role": "assistant", "content": [{"type": "text", "text": samples["output"][cont]}]},
]
)
return formatted_samples
# For multi-image example
def prepare_dataset(dataset: DatasetDict, dataset_name: str) -> DatasetDict:
all_files = list_repo_files(dataset_name, repo_type="dataset")
zip_files = [f for f in all_files if f.endswith(".zip")]
for zip_filename in zip_files:
zip_path = hf_hub_download(repo_id=dataset_name, filename=zip_filename, repo_type="dataset")
extract_folder = zip_filename.replace(".zip", "")
os.makedirs(extract_folder, exist_ok=True)
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(extract_folder)
dataset = dataset.map(format_data, batched=True, batch_size=4, num_proc=16)
return dataset
def main():
parser = TrlParser((ScriptArguments, SFTConfig, ModelConfig))
script_args, training_args, model_args = parser.parse_args_and_config()
training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False)
training_args.max_length = None
################
# Model, Tokenizer & Processor
################
torch_dtype = (
model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype)
)
quantization_config = get_quantization_config(model_args)
model_kwargs = dict(
revision=model_args.model_revision,
attn_implementation=model_args.attn_implementation,
torch_dtype=torch_dtype,
device_map=get_kbit_device_map() if quantization_config is not None else None,
quantization_config=quantization_config,
)
model = AutoModelForImageTextToText.from_pretrained(
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, **model_kwargs
)
################
# Dataset
################
dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config)
if script_args.dataset_name == "FanqingM/MMIU-Benchmark":
dataset = prepare_dataset(dataset, script_args.dataset_name)
################
# Training
################
trainer = SFTTrainer(
model=model,
args=training_args,
train_dataset=dataset[script_args.dataset_train_split],
eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None,
peft_config=get_peft_config(model_args),
)
trainer.train()
# Save and push to hub
trainer.save_model(training_args.output_dir)
if training_args.push_to_hub:
trainer.push_to_hub(dataset_name=script_args.dataset_name)
if __name__ == "__main__":
main()
| trl/examples/scripts/sft_vlm_gemma3.py/0 | {
"file_path": "trl/examples/scripts/sft_vlm_gemma3.py",
"repo_id": "trl",
"token_count": 2727
} | 639 |
{%- if tools %}
{{- '<|im_start|>system\n' }}
{%- if messages[0].role == 'system' %}
{{- messages[0].content + '\n\n' }}
{%- endif %}
{{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
{%- for tool in tools %}
{{- "\n" }}
{{- tool | tojson }}
{%- endfor %}
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
{%- else %}
{%- if messages[0].role == 'system' %}
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
{%- for message in messages[::-1] %}
{%- set index = (messages|length - 1) - loop.index0 %}
{%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
{%- set ns.multi_step_tool = false %}
{%- set ns.last_query_index = index %}
{%- endif %}
{%- endfor %}
{%- for message in messages %}
{%- if message.content is string %}
{%- set content = message.content %}
{%- else %}
{%- set content = '' %}
{%- endif %}
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
{%- elif message.role == "assistant" %}
{%- set reasoning_content = '' %}
{%- if message.reasoning_content is string %}
{%- set reasoning_content = message.reasoning_content %}
{%- else %}
{%- if '</think>' in content %}
{%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
{%- set content = content.split('</think>')[-1].lstrip('\n') %}
{%- endif %}
{%- endif %}
{%- if loop.index0 > ns.last_query_index %}
{%- if loop.last or (not loop.last and reasoning_content) %}
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
{%- else %}
{{- '<|im_start|>' + message.role + '\n' + content }}
{%- endif %}
{%- else %}
{{- '<|im_start|>' + message.role + '\n' + content }}
{%- endif %}
{%- if message.tool_calls %}
{%- for tool_call in message.tool_calls %}
{%- if (loop.first and content) or (not loop.first) %}
{{- '\n' }}
{%- endif %}
{%- if tool_call.function %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- '<tool_call>\n{"name": "' }}
{{- tool_call.name }}
{{- '", "arguments": ' }}
{%- if tool_call.arguments is string %}
{{- tool_call.arguments }}
{%- else %}
{{- tool_call.arguments | tojson }}
{%- endif %}
{{- '}\n</tool_call>' }}
{%- endfor %}
{%- endif %}
{{- '<|im_end|>\n' }}
{%- elif message.role == "tool" %}
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
{{- '<|im_start|>user' }}
{%- endif %}
{{- '\n<tool_response>\n' }}
{{- content }}
{{- '\n</tool_response>' }}
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
{{- '<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- if enable_thinking is defined and enable_thinking is false %}
{{- '<think>\n\n</think>\n\n' }}
{%- endif %}
{%- endif %} | trl/tests/data/template.jinja/0 | {
"file_path": "trl/tests/data/template.jinja",
"repo_id": "trl",
"token_count": 2029
} | 640 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import itertools
import textwrap
import unittest
from time import strftime
from datasets import Dataset, DatasetDict
from parameterized import parameterized
from transformers import AutoProcessor, AutoTokenizer
from trl.data_utils import (
apply_chat_template,
extract_prompt,
is_conversational,
is_conversational_from_value,
maybe_apply_chat_template,
maybe_convert_to_chatml,
maybe_extract_prompt,
maybe_unpair_preference_dataset,
pack_dataset,
prepare_multimodal_messages,
truncate_dataset,
unpair_preference_dataset,
)
from .testing_utils import TrlTestCase
class PrepareMultimodalMessagesTester(unittest.TestCase):
def test_basic_user_assistant_conversation(self):
"""Test basic conversation with user and assistant messages."""
messages = [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is blue."},
]
prepare_multimodal_messages(messages, num_images=1)
expected = [
{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "What color is the sky?"}]},
{"role": "assistant", "content": [{"type": "text", "text": "It is blue."}]},
]
self.assertEqual(messages, expected)
def test_first_user_message_gets_image(self):
"""Test that only the first user message gets an image placeholder."""
messages = [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is blue."},
{"role": "user", "content": "How about the grass?"},
]
prepare_multimodal_messages(messages, num_images=1)
expected = [
{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "What color is the sky?"}]},
{"role": "assistant", "content": [{"type": "text", "text": "It is blue."}]},
{"role": "user", "content": [{"type": "text", "text": "How about the grass?"}]},
]
self.assertEqual(messages, expected)
def test_multiple_images(self):
"""Test that multiple images are added to the first user message."""
messages = [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is blue."},
]
prepare_multimodal_messages(messages, num_images=3)
expected = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "image"},
{"type": "image"},
{"type": "text", "text": "What color is the sky?"},
],
},
{"role": "assistant", "content": [{"type": "text", "text": "It is blue."}]},
]
self.assertEqual(messages, expected)
def test_system_message_transformation(self):
"""Test that system messages are properly transformed."""
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "What color is the sky?"},
]
prepare_multimodal_messages(messages, num_images=1)
expected = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant"}]},
{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "What color is the sky?"}]},
]
self.assertEqual(messages, expected)
def test_already_prepared_messages_unchanged(self):
"""Test that messages with list content are not modified."""
messages = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant"}]},
{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "What color is the sky?"}]},
{"role": "assistant", "content": [{"type": "text", "text": "It is blue."}]},
]
original = copy.deepcopy(messages)
prepare_multimodal_messages(messages, num_images=1)
self.assertEqual(messages, original)
def test_mixed_prepared_and_unprepared_messages(self):
"""Test handling of mixed prepared and unprepared messages."""
messages = [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": [{"type": "text", "text": "It is blue."}]},
{"role": "user", "content": "What about the grass?"},
]
prepare_multimodal_messages(messages, num_images=1)
expected = [
{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "What color is the sky?"}]},
{"role": "assistant", "content": [{"type": "text", "text": "It is blue."}]},
{"role": "user", "content": [{"type": "text", "text": "What about the grass?"}]},
]
self.assertEqual(messages, expected)
class IsConversationalTester(TrlTestCase):
conversational_examples = [
{ # Language modeling
"messages": [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is blue."},
],
},
{ # Prompt-only
"prompt": [{"role": "user", "content": "What color is the sky?"}],
},
{ # Prompt-completion
"prompt": [{"role": "user", "content": "What color is the sky?"}],
"completion": [{"role": "assistant", "content": "It is blue."}],
},
{ # Preference
"prompt": [{"role": "user", "content": "What color is the sky?"}],
"chosen": [{"role": "assistant", "content": "It is blue."}],
"rejected": [{"role": "assistant", "content": "It is green."}],
},
{ # Preference with implicit prompt
"chosen": [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is blue."},
],
"rejected": [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is green."},
],
},
{ # Unpaired preference
"prompt": [{"role": "user", "content": "What color is the sky?"}],
"completion": [{"role": "assistant", "content": "It is blue."}],
"label": True,
},
{ # Language modeling with harmony
"messages": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "thinking": "The user asks the color of the sky...", "content": "It is blue."},
],
},
{ # Prompt-only with harmony
"prompt": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
],
},
{ # Prompt-completion with harmony
"prompt": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
],
"completion": [
{"role": "assistant", "thinking": "The user asks the color of the sky...", "content": "It is blue."},
],
},
{ # Preference with harmony
"prompt": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
],
"chosen": [
{"role": "assistant", "thinking": "The user asks the color of the sky...", "content": "It is blue."},
],
"rejected": [
{"role": "assistant", "thinking": "The user asks the color of the tree...", "content": "It is green."},
],
},
{ # Preference with implicit prompt and harmony
"chosen": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "thinking": "The user asks the color of the sky...", "content": "It is blue."},
],
"rejected": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "thinking": "The user asks the color of the tree...", "content": "It is green."},
],
},
{ # Unpaired preference with harmony
"prompt": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
],
"completion": [
{"role": "assistant", "thinking": "The user asks the color of the sky...", "content": "It is blue."},
],
"label": True,
},
]
non_conversational_examples = [
{"prompt": "The sky is", "completion": " blue."},
{"text": "The sky is blue."},
{"prompt": "The sky is"},
{"prompt": "The sky is", "chosen": " blue.", "rejected": " green."},
{"prompt": "The sky is", "completion": " blue.", "label": True},
]
@parameterized.expand(itertools.product(conversational_examples))
def test_conversational(self, example):
self.assertTrue(is_conversational(example))
@parameterized.expand(itertools.product(non_conversational_examples))
def test_non_conversational(self, example):
self.assertFalse(is_conversational(example))
class IsConversationalFromValueTester(TrlTestCase):
def test_positive_1(self):
example = {
"conversations": [
{"from": "user", "value": "What color is the sky?"},
{"from": "assistant", "value": "It is blue."},
],
}
self.assertTrue(is_conversational_from_value(example))
def test_negative_1(self):
example = {
"messages": [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is blue."},
],
}
self.assertFalse(is_conversational_from_value(example))
def test_negative_2(self):
example = {"text": "The sky is blue."}
self.assertFalse(is_conversational_from_value(example))
class ApplyChatTemplateTester(TrlTestCase):
tokenizers = [
"trl-internal-testing/tiny-CohereForCausalLM",
"trl-internal-testing/tiny-DbrxForCausalLM",
"trl-internal-testing/tiny-DeepseekV3ForCausalLM",
"trl-internal-testing/tiny-DeepseekV3ForCausalLM-0528",
"trl-internal-testing/tiny-FalconMambaForCausalLM",
"trl-internal-testing/tiny-Gemma2ForCausalLM",
"trl-internal-testing/tiny-GemmaForCausalLM",
"trl-internal-testing/tiny-GptOssForCausalLM",
"trl-internal-testing/tiny-LlamaForCausalLM-3.1",
"trl-internal-testing/tiny-LlamaForCausalLM-3.2",
"trl-internal-testing/tiny-LlamaForCausalLM-3",
"trl-internal-testing/tiny-MistralForCausalLM-0.1",
"trl-internal-testing/tiny-MistralForCausalLM-0.2",
"trl-internal-testing/tiny-Phi3ForCausalLM",
"trl-internal-testing/tiny-Qwen2ForCausalLM-2.5",
"trl-internal-testing/tiny-Qwen3ForCausalLM",
]
conversational_examples = [
{ # Language modeling
"messages": [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is blue."},
],
},
{ # Prompt-only
"prompt": [{"role": "user", "content": "What color is the sky?"}],
},
{ # Prompt-completion
"prompt": [{"role": "user", "content": "What color is the sky?"}],
"completion": [{"role": "assistant", "content": "It is blue."}],
},
{ # Preference
"prompt": [{"role": "user", "content": "What color is the sky?"}],
"chosen": [{"role": "assistant", "content": "It is blue."}],
"rejected": [{"role": "assistant", "content": "It is green."}],
},
{ # Preference with implicit prompt
"chosen": [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is blue."},
],
"rejected": [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is green."},
],
},
{ # Unpaired preference
"prompt": [{"role": "user", "content": "What color is the sky?"}],
"completion": [{"role": "assistant", "content": "It is blue."}],
"label": True,
},
]
non_conversational_examples = [
{"text": "The sky is blue."}, # Language modeling
{"prompt": "The sky is"}, # Prompt-only
{"prompt": "The sky is", "completion": " blue."}, # Prompt-completion
{"prompt": "The sky is", "chosen": " blue.", "rejected": " green."}, # Preference
{"chosen": "The sky is blue.", "rejected": "The sky is green."}, # Preference with implicit prompt
{"prompt": "The sky is", "completion": " blue.", "label": True}, # Unpaired preference
]
@parameterized.expand(itertools.product(tokenizers, conversational_examples))
def test_apply_chat_template(self, tokenizer_id, example):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
result = apply_chat_template(example, tokenizer)
# Checking if the result is a dictionary
self.assertIsInstance(result, dict)
# The chat template should be applied to the following keys
for key in ["prompt", "chosen", "rejected", "completion"]:
if key in example:
self.assertIn(key, result)
self.assertIsInstance(result[key], str)
# Exception for messages, the key is "text" once the chat template is applied
if "messages" in example:
self.assertIn("text", result)
self.assertIsInstance(result["text"], str)
# The label should be kept
if "label" in example:
self.assertIn("label", result)
self.assertIsInstance(result["label"], bool)
self.assertEqual(result["label"], example["label"])
# both conversational and non-conversational examples
@parameterized.expand(itertools.product(tokenizers, conversational_examples + non_conversational_examples))
def test_maybe_apply_chat_template(self, tokenizer_id, example):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
result = maybe_apply_chat_template(example, tokenizer)
# Checking if the result is a dictionary
self.assertIsInstance(result, dict)
# The chat template should be applied to the following keys
for key in ["prompt", "chosen", "rejected", "completion"]:
if key in example:
self.assertIn(key, result)
self.assertIsInstance(result[key], str)
# Exception for messages, the key is "text" once the chat template is applied
if "messages" in example:
self.assertIn("text", result)
self.assertIsInstance(result["text"], str)
# The label should be kept
if "label" in example:
self.assertIn("label", result)
self.assertIsInstance(result["label"], bool)
self.assertEqual(result["label"], example["label"])
def test_apply_chat_template_with_tools(self):
tokenizer = AutoProcessor.from_pretrained("trl-internal-testing/tiny-LlamaForCausalLM-3.2")
# Define dummy test tools
def get_current_temperature(location: str):
"""
Gets the temperature at a given location.
Args:
location: The location to get the temperature for
"""
return 22.0
# Define test case
test_case = {
"prompt": [
{"content": "Whats the temperature in London?", "role": "user"},
]
}
# Test with tools
result_with_tools = apply_chat_template(test_case, tokenizer, tools=[get_current_temperature])
# Verify tools are included in the output
self.assertIn("get_current_temperature", result_with_tools["prompt"])
# Test without tools
result_without_tools = apply_chat_template(test_case, tokenizer, tools=None)
# Verify tools are not included in the output
self.assertNotIn("get_current_temperature", result_without_tools["prompt"])
class ApplyChatTemplateHarmonyTester(TrlTestCase):
def test_language_modeling(self):
messages = {
"messages": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "thinking": "The user asks the color of the sky...", "content": "It is blue."},
],
}
output = apply_chat_template(
messages,
tokenizer=AutoTokenizer.from_pretrained("trl-internal-testing/tiny-GptOssForCausalLM"),
reasoning_effort="low",
model_identity="You are HuggingGPT.",
)
# docstyle-ignore
expected = textwrap.dedent(f"""\
<|start|>system<|message|>You are HuggingGPT.
Knowledge cutoff: 2024-06
Current date: {strftime("%Y-%m-%d")}
Reasoning: low
# Valid channels: analysis, commentary, final. Channel must be included for every message.<|end|><|start|>developer<|message|># Instructions
Respond in a friendly manner.<|end|><|start|>user<|message|>What color is the sky?<|end|><|start|>assistant<|channel|>analysis<|message|>The user asks the color of the sky...<|end|><|start|>assistant<|channel|>final<|message|>It is blue.<|return|>""")
self.assertEqual(output["text"], expected)
def test_prompt_only(self):
messages = {
"prompt": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
],
}
output = apply_chat_template(
messages,
tokenizer=AutoTokenizer.from_pretrained("trl-internal-testing/tiny-GptOssForCausalLM"),
reasoning_effort="low",
model_identity="You are HuggingGPT.",
)
# docstyle-ignore
expected = textwrap.dedent(f"""\
<|start|>system<|message|>You are HuggingGPT.
Knowledge cutoff: 2024-06
Current date: {strftime("%Y-%m-%d")}
Reasoning: low
# Valid channels: analysis, commentary, final. Channel must be included for every message.<|end|><|start|>developer<|message|># Instructions
Respond in a friendly manner.<|end|><|start|>user<|message|>What color is the sky?<|end|><|start|>assistant""")
self.assertEqual(output["prompt"], expected)
def test_prompt_completion(self):
messages = {
"prompt": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
],
"completion": [
{"role": "assistant", "thinking": "The user asks the color of the sky...", "content": "It is blue."},
],
}
output = apply_chat_template(
messages,
tokenizer=AutoTokenizer.from_pretrained("trl-internal-testing/tiny-GptOssForCausalLM"),
reasoning_effort="low",
model_identity="You are HuggingGPT.",
)
# docstyle-ignore
expected_prompt = textwrap.dedent(f"""\
<|start|>system<|message|>You are HuggingGPT.
Knowledge cutoff: 2024-06
Current date: {strftime("%Y-%m-%d")}
Reasoning: low
# Valid channels: analysis, commentary, final. Channel must be included for every message.<|end|><|start|>developer<|message|># Instructions
Respond in a friendly manner.<|end|><|start|>user<|message|>What color is the sky?<|end|><|start|>assistant""")
expected_completion = "<|channel|>analysis<|message|>The user asks the color of the sky...<|end|><|start|>assistant<|channel|>final<|message|>It is blue.<|return|>"
self.assertEqual(output["prompt"], expected_prompt)
self.assertEqual(output["completion"], expected_completion)
def test_preference(self):
messages = {
"prompt": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
],
"chosen": [
{"role": "assistant", "thinking": "The user asks the color of the sky...", "content": "It is blue."},
],
"rejected": [
{"role": "assistant", "thinking": "The user asks the color of the tree...", "content": "It is green."},
],
}
output = apply_chat_template(
messages,
tokenizer=AutoTokenizer.from_pretrained("trl-internal-testing/tiny-GptOssForCausalLM"),
reasoning_effort="low",
model_identity="You are HuggingGPT.",
)
# docstyle-ignore
expected_prompt = textwrap.dedent(f"""\
<|start|>system<|message|>You are HuggingGPT.
Knowledge cutoff: 2024-06
Current date: {strftime("%Y-%m-%d")}
Reasoning: low
# Valid channels: analysis, commentary, final. Channel must be included for every message.<|end|><|start|>developer<|message|># Instructions
Respond in a friendly manner.<|end|><|start|>user<|message|>What color is the sky?<|end|><|start|>assistant""")
expected_chosen = "<|channel|>analysis<|message|>The user asks the color of the sky...<|end|><|start|>assistant<|channel|>final<|message|>It is blue.<|return|>"
expected_rejected = "<|channel|>analysis<|message|>The user asks the color of the tree...<|end|><|start|>assistant<|channel|>final<|message|>It is green.<|return|>"
self.assertEqual(output["prompt"], expected_prompt)
self.assertEqual(output["chosen"], expected_chosen)
self.assertEqual(output["rejected"], expected_rejected)
def test_preference_with_implicit_prompt(self):
messages = {
"chosen": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "thinking": "The user asks the color of the sky...", "content": "It is blue."},
],
"rejected": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "thinking": "The user asks the color of the tree...", "content": "It is green."},
],
}
output = apply_chat_template(
messages,
tokenizer=AutoTokenizer.from_pretrained("trl-internal-testing/tiny-GptOssForCausalLM"),
reasoning_effort="low",
model_identity="You are HuggingGPT.",
)
# docstyle-ignore
expected_chosen = textwrap.dedent(f"""\
<|start|>system<|message|>You are HuggingGPT.
Knowledge cutoff: 2024-06
Current date: {strftime("%Y-%m-%d")}
Reasoning: low
# Valid channels: analysis, commentary, final. Channel must be included for every message.<|end|><|start|>developer<|message|># Instructions
Respond in a friendly manner.<|end|><|start|>user<|message|>What color is the sky?<|end|><|start|>assistant<|channel|>analysis<|message|>The user asks the color of the sky...<|end|><|start|>assistant<|channel|>final<|message|>It is blue.<|return|>""")
# docstyle-ignore
expected_rejected = textwrap.dedent(f"""\
<|start|>system<|message|>You are HuggingGPT.
Knowledge cutoff: 2024-06
Current date: {strftime("%Y-%m-%d")}
Reasoning: low
# Valid channels: analysis, commentary, final. Channel must be included for every message.<|end|><|start|>developer<|message|># Instructions
Respond in a friendly manner.<|end|><|start|>user<|message|>What color is the sky?<|end|><|start|>assistant<|channel|>analysis<|message|>The user asks the color of the tree...<|end|><|start|>assistant<|channel|>final<|message|>It is green.<|return|>""")
self.assertEqual(output["chosen"], expected_chosen)
self.assertEqual(output["rejected"], expected_rejected)
def test_unpaired_preference(self):
messages = {
"prompt": [
{"role": "system", "content": "Respond in a friendly manner."},
{"role": "user", "content": "What color is the sky?"},
],
"completion": [
{"role": "assistant", "thinking": "The user asks the color of the sky...", "content": "It is blue."},
],
"label": True,
}
output = apply_chat_template(
messages,
tokenizer=AutoTokenizer.from_pretrained("trl-internal-testing/tiny-GptOssForCausalLM"),
reasoning_effort="low",
model_identity="You are HuggingGPT.",
)
# docstyle-ignore
expected_prompt = textwrap.dedent(f"""\
<|start|>system<|message|>You are HuggingGPT.
Knowledge cutoff: 2024-06
Current date: {strftime("%Y-%m-%d")}
Reasoning: low
# Valid channels: analysis, commentary, final. Channel must be included for every message.<|end|><|start|>developer<|message|># Instructions
Respond in a friendly manner.<|end|><|start|>user<|message|>What color is the sky?<|end|><|start|>assistant""")
expected_completion = "<|channel|>analysis<|message|>The user asks the color of the sky...<|end|><|start|>assistant<|channel|>final<|message|>It is blue.<|return|>"
self.assertEqual(output["prompt"], expected_prompt)
self.assertEqual(output["completion"], expected_completion)
self.assertTrue(output["label"])
class UnpairPreferenceDatasetTester(TrlTestCase):
paired_dataset = Dataset.from_dict(
{
"prompt": ["The sky is", "The sun is"],
"chosen": [" blue.", " in the sky."],
"rejected": [" green.", " in the sea."],
}
)
unpaired_dataset = Dataset.from_dict(
{
"prompt": ["The sky is", "The sun is", "The sky is", "The sun is"],
"completion": [" blue.", " in the sky.", " green.", " in the sea."],
"label": [True, True, False, False],
}
)
def test_unpair_preference_dataset(self):
# Test that a paired dataset is correctly converted to unpaired
unpaired_dataset = unpair_preference_dataset(self.paired_dataset)
self.assertEqual(
unpaired_dataset.to_dict(),
self.unpaired_dataset.to_dict(),
"The paired dataset should be converted to unpaired.",
)
def test_unpair_preference_dataset_dict(self):
# Test that a paired dataset dict is correctly converted to unpaired
paired_dataset_dict = DatasetDict({"abc": self.paired_dataset})
unpaired_dataset_dict = unpair_preference_dataset(paired_dataset_dict)
self.assertEqual(
unpaired_dataset_dict["abc"].to_dict(),
self.unpaired_dataset.to_dict(),
"The paired dataset should be converted to unpaired.",
)
def test_maybe_unpair_preference_dataset(self):
# Test that a paired dataset is correctly converted to unpaired with maybe_unpair_preference_dataset
unpaired_dataset = maybe_unpair_preference_dataset(self.paired_dataset)
self.assertEqual(
unpaired_dataset.to_dict(),
self.unpaired_dataset.to_dict(),
"The paired dataset should be converted to unpaired.",
)
def test_maybe_unpair_preference_dataset_dict(self):
# Test that a paired dataset dict is correctly converted to unpaired with maybe_unpair_preference_dataset
paired_dataset_dict = DatasetDict({"abc": self.paired_dataset})
unpaired_dataset_dict = maybe_unpair_preference_dataset(paired_dataset_dict)
self.assertEqual(
unpaired_dataset_dict["abc"].to_dict(),
self.unpaired_dataset.to_dict(),
"The paired dataset should be converted to unpaired.",
)
def test_maybe_unpair_preference_dataset_already_paired(self):
# Test that a paired dataset remains unchanged with maybe_unpair_preference_dataset
unpaired_dataset = maybe_unpair_preference_dataset(self.unpaired_dataset)
self.assertEqual(
unpaired_dataset.to_dict(),
self.unpaired_dataset.to_dict(),
"The unpaired dataset should remain unchanged.",
)
def test_maybe_unpair_preference_dataset_dict_already_paired(self):
# Test that a paired dataset dict remains unchanged with maybe_unpair_preference_dataset
unpaired_dataset_dict = maybe_unpair_preference_dataset(DatasetDict({"abc": self.unpaired_dataset}))
self.assertEqual(
unpaired_dataset_dict["abc"].to_dict(),
self.unpaired_dataset.to_dict(),
"The unpaired dataset should remain unchanged.",
)
class ExtractPromptTester(TrlTestCase):
example_implicit_prompt_conversational = {
"chosen": [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is blue."},
],
"rejected": [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is green."},
],
}
example_explicit_prompt_conversational = {
"prompt": [
{"role": "user", "content": "What color is the sky?"},
],
"chosen": [
{"role": "assistant", "content": "It is blue."},
],
"rejected": [
{"role": "assistant", "content": "It is green."},
],
}
example_implicit_prompt_standard = {
"chosen": "The sky is blue.",
"rejected": "The sky is green.",
}
example_explicit_prompt_standard = {
"prompt": "The sky is",
"chosen": " blue.",
"rejected": " green.",
}
def test_extract_prompt_conversational(self):
# Test that the prompt is correctly extracted from the dataset
example_extracted_prompt = extract_prompt(self.example_implicit_prompt_conversational)
self.assertEqual(
example_extracted_prompt,
self.example_explicit_prompt_conversational,
"The prompt is not correctly extracted from the dataset.",
)
def test_maybe_extract_prompt_conversational(self):
# Test that the prompt is correctly extracted from the dataset with maybe_extract_prompt
example_extracted_prompt = maybe_extract_prompt(self.example_implicit_prompt_conversational)
self.assertEqual(
example_extracted_prompt,
self.example_explicit_prompt_conversational,
"The prompt is not correctly extracted from the dataset.",
)
def test_maybe_extract_prompt_conversational_already_explicit(self):
# Test that the prompt remains unchanged with maybe_extract_prompt
example_extracted_prompt = maybe_extract_prompt(self.example_explicit_prompt_conversational)
self.assertEqual(
example_extracted_prompt,
self.example_explicit_prompt_conversational,
"The prompt should remain unchanged.",
)
def test_extract_prompt_standard(self):
# Test that the prompt is correctly extracted from the dataset
example_extracted_prompt = extract_prompt(self.example_implicit_prompt_standard)
self.assertEqual(
example_extracted_prompt,
self.example_explicit_prompt_standard,
"The prompt is not correctly extracted from the dataset.",
)
def test_maybe_extract_prompt_standard(self):
# Test that the prompt is correctly extracted from the dataset with maybe_extract_prompt
example_extracted_prompt = maybe_extract_prompt(self.example_implicit_prompt_standard)
self.assertEqual(
example_extracted_prompt,
self.example_explicit_prompt_standard,
"The prompt is not correctly extracted from the dataset.",
)
def test_maybe_extract_prompt_standard_already_explicit(self):
# Test that the prompt remains unchanged with maybe_extract_prompt
example_extracted_prompt = maybe_extract_prompt(self.example_explicit_prompt_standard)
self.assertEqual(
example_extracted_prompt,
self.example_explicit_prompt_standard,
"The prompt should remain unchanged.",
)
class TestPackDatasetWrapped(TrlTestCase):
def test_with_dataset(self):
examples = {
"input_ids": [[1, 2, 3], [4, 5, 6, 7], [8]],
"attention_mask": [[0, 1, 1], [0, 0, 1, 1], [1]],
}
dataset = Dataset.from_dict(examples)
seq_length = 3
expected_output = {
"input_ids": [[1, 2, 3], [4, 5, 6], [7, 8]],
"attention_mask": [[0, 1, 1], [0, 0, 1], [1, 1]],
}
dataset = pack_dataset(dataset, seq_length, strategy="wrapped")
self.assertEqual(dataset.to_dict(), expected_output)
def test_with_iterable_dataset(self):
examples = {
"input_ids": [[1, 2, 3], [4, 5, 6, 7], [8]],
"attention_mask": [[0, 1, 1], [0, 0, 1, 1], [1]],
}
dataset = Dataset.from_dict(examples).to_iterable_dataset()
seq_length = 3
expected_output = {
"input_ids": [[1, 2, 3], [4, 5, 6], [7, 8]],
"attention_mask": [[0, 1, 1], [0, 0, 1], [1, 1]],
}
dataset = pack_dataset(dataset, seq_length, strategy="wrapped")
num_examples = len(examples[next(iter(examples))])
self.assertEqual(next(iter(dataset.batch(batch_size=num_examples))), expected_output)
class TestPackDatasetBfd(TrlTestCase):
def test_simple(self):
examples = {
"input_ids": [[1, 2, 3], [4, 5, 6, 7], [8]],
"attention_mask": [[0, 1, 1], [0, 0, 1, 1], [1]],
}
dataset = Dataset.from_dict(examples)
seq_length = 4
expected_output = {
"input_ids": [[4, 5, 6, 7], [1, 2, 3, 8]],
"attention_mask": [[0, 0, 1, 1], [0, 1, 1, 1]],
"seq_lengths": [[4], [3, 1]],
}
dataset = pack_dataset(dataset, seq_length, strategy="bfd")
self.assertEqual(dataset.to_dict(), expected_output)
def test_with_iterable_dataset(self):
examples = {
"input_ids": [[1, 2, 3], [4, 5, 6, 7], [8]],
"attention_mask": [[0, 1, 1], [0, 0, 1, 1], [1]],
}
dataset = Dataset.from_dict(examples).to_iterable_dataset()
seq_length = 4
expected_output = {
"input_ids": [[4, 5, 6, 7], [1, 2, 3, 8]],
"attention_mask": [[0, 0, 1, 1], [0, 1, 1, 1]],
"seq_lengths": [[4], [3, 1]],
}
dataset = pack_dataset(dataset, seq_length, strategy="bfd")
num_examples = len(examples[next(iter(examples))])
self.assertEqual(next(iter(dataset.batch(batch_size=num_examples))), expected_output)
def test_with_truncation(self):
examples = {
"input_ids": [[1, 2, 3, 4, 5], [6, 7], [8, 9, 10, 11], [12]],
"attention_mask": [[1, 1, 1, 1, 1], [1, 1], [1, 1, 1, 1], [1]],
}
dataset = Dataset.from_dict(examples)
seq_length = 4
expected_output = {
"input_ids": [[1, 2, 3, 4], [8, 9, 10, 11], [6, 7, 12]],
"attention_mask": [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1]],
"seq_lengths": [[4], [4], [2, 1]],
}
dataset = pack_dataset(dataset, seq_length, strategy="bfd")
self.assertEqual(dataset.to_dict(), expected_output)
def test_with_non_power_of_2(self):
examples = {
"input_ids": [[1, 2, 3, 4, 5], [6], [7, 8, 9, 10], [11, 12, 13]],
"attention_mask": [[1, 0, 0, 1, 1], [0], [0, 1, 0, 0], [1, 0, 1]],
}
dataset = Dataset.from_dict(examples)
seq_length = 5
expected_output = {
"input_ids": [[1, 2, 3, 4, 5], [7, 8, 9, 10, 6], [11, 12, 13]],
"attention_mask": [[1, 0, 0, 1, 1], [0, 1, 0, 0, 0], [1, 0, 1]],
"seq_lengths": [[5], [4, 1], [3]],
}
dataset = pack_dataset(dataset, seq_length, strategy="bfd")
self.assertEqual(dataset.to_dict(), expected_output)
class TestTruncateExamples(TrlTestCase):
def test_with_dataset(self):
examples = {
"input_ids": [[1, 2, 3], [4, 5, 6, 7], [8]],
"attention_mask": [[0, 1, 1], [0, 0, 1, 1], [1]],
}
dataset = Dataset.from_dict(examples)
max_length = 2
expected_output = {
"input_ids": [[1, 2], [4, 5], [8]],
"attention_mask": [[0, 1], [0, 0], [1]],
}
dataset = truncate_dataset(dataset, max_length)
self.assertEqual(dataset.to_dict(), expected_output)
def test_with_iterable_dataset(self):
examples = {
"input_ids": [[1, 2, 3], [4, 5, 6, 7], [8]],
"attention_mask": [[0, 1, 1], [0, 0, 1, 1], [1]],
}
dataset = Dataset.from_dict(examples).to_iterable_dataset()
max_length = 2
expected_output = {
"input_ids": [[1, 2], [4, 5], [8]],
"attention_mask": [[0, 1], [0, 0], [1]],
}
dataset = truncate_dataset(dataset, max_length)
num_examples = len(examples[next(iter(examples))])
self.assertEqual(next(iter(dataset.batch(batch_size=num_examples))), expected_output)
def test_with_extra_column(self):
examples = {
"input_ids": [[1, 2, 3], [4, 5, 6, 7], [8]],
"attention_mask": [[0, 1, 1], [0, 0, 1, 1], [1]],
"my_column": ["a", "b", "c"],
}
dataset = Dataset.from_dict(examples)
max_length = 2
expected_output = {
"input_ids": [[1, 2], [4, 5], [8]],
"attention_mask": [[0, 1], [0, 0], [1]],
"my_column": ["a", "b", "c"],
}
dataset = truncate_dataset(dataset, max_length)
self.assertEqual(dataset.to_dict(), expected_output)
class TestMaybeConvertToChatML(TrlTestCase):
def test_with_conversations_key(self):
# Particular case where the key is "conversations": we rename it to "messages"
example = {
"conversations": [
{"from": "user", "value": "What color is the sky?"},
{"from": "assistant", "value": "It is blue."},
]
}
expected_output = {
"messages": [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is blue."},
]
}
self.assertEqual(maybe_convert_to_chatml(example), expected_output)
def test_without_conversations_key(self):
# Same as before, but we don't rename the keys
example = {
"prompt": [{"from": "user", "value": "What color is the sky?"}],
"completion": [{"from": "assistant", "value": "It is blue."}],
}
expected_output = {
"prompt": [{"role": "user", "content": "What color is the sky?"}],
"completion": [{"role": "assistant", "content": "It is blue."}],
}
self.assertEqual(maybe_convert_to_chatml(example), expected_output)
def test_not_conversional(self):
# When not needed, the example should remain unchanged
example = {"text": "The sky is blue."}
self.assertEqual(maybe_convert_to_chatml(example), example)
def test_already_chatml(self):
# When the example is already in ChatML format, it should remain unchanged
example = {
"messages": [
{"role": "user", "content": "What color is the sky?"},
{"role": "assistant", "content": "It is blue."},
]
}
self.assertEqual(maybe_convert_to_chatml(example), example)
# Run the tests
if __name__ == "__main__":
unittest.main()
| trl/tests/test_data_utils.py/0 | {
"file_path": "trl/tests/test_data_utils.py",
"repo_id": "trl",
"token_count": 18808
} | 641 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock
import torch
from datasets import Dataset, load_dataset
from parameterized import parameterized
from transformers import AutoModelForTokenClassification, AutoTokenizer, PreTrainedTokenizerBase
from transformers.testing_utils import require_peft
from transformers.utils import is_peft_available
from trl import PRMConfig, PRMTrainer
from .testing_utils import TrlTestCase
if is_peft_available():
from peft import LoraConfig, TaskType
class TestTokenizeRow(TrlTestCase):
def setUp(self):
super().setUp()
# Set up the mock tokenizer with specific behaviors
self.tokenizer = MagicMock(spec=PreTrainedTokenizerBase)
self.tokenizer.bos_token_id = 0
self.tokenizer.eos_token_id = 2
def mock_encode(text, add_special_tokens):
token_map = {
"Which number is larger, 9.8 or 9.11?": [465, 6766, 318, 298],
"11 is greater than 8.": [4, 322, 12],
"Hence, 9.11 > 9.8.": [4995, 11, 22],
"\n": [1030],
"\n\n": [1030, 1030],
}
return token_map[text]
def mock_tokenizer_call(text, add_special_tokens):
return {"input_ids": mock_encode(text, add_special_tokens)}
self.tokenizer.encode.side_effect = mock_encode
self.tokenizer.side_effect = mock_tokenizer_call
def test_tokenize_row_no_truncation(self):
# Define the input features
features = {
"prompt": "Which number is larger, 9.8 or 9.11?",
"completions": ["11 is greater than 8.", "Hence, 9.11 > 9.8."],
"labels": [True, False],
}
# Call the method with no truncation
result = PRMTrainer.tokenize_row(
features=features,
tokenizer=self.tokenizer,
step_separator="\n",
max_length=None,
max_prompt_length=None,
max_completion_length=None,
train_on_last_step_only=False,
is_eval=False,
)
self.assertEqual(
result,
{
"input_ids": [0, 465, 6766, 318, 298, 4, 322, 12, 1030, 4995, 11, 22, 1030],
"labels": [-100, -100, -100, -100, -100, -100, -100, -100, 1, -100, -100, -100, 0],
},
)
def test_tokenize_row_train_on_last_step_only(self):
# Define the input features
features = {
"prompt": "Which number is larger, 9.8 or 9.11?",
"completions": ["11 is greater than 8.", "Hence, 9.11 > 9.8."],
"labels": [True, False],
}
result = PRMTrainer.tokenize_row(
features=features,
tokenizer=self.tokenizer,
step_separator="\n",
max_length=None,
max_prompt_length=None,
max_completion_length=None,
train_on_last_step_only=True,
is_eval=False,
)
self.assertEqual(
result,
{
"input_ids": [0, 465, 6766, 318, 298, 4, 322, 12, 1030, 4995, 11, 22, 1030],
"labels": [-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 0],
},
)
def test_tokenize_row_prompt_truncation(self):
# Define the input features
features = {
"prompt": "Which number is larger, 9.8 or 9.11?",
"completions": ["11 is greater than 8.", "Hence, 9.11 > 9.8."],
"labels": [True, False],
}
# Call the method with truncation on the completion
result = PRMTrainer.tokenize_row(
features=features,
tokenizer=self.tokenizer,
step_separator="\n",
max_length=None,
max_prompt_length=3,
max_completion_length=None,
train_on_last_step_only=False,
is_eval=False,
)
self.assertEqual(
result,
{
"input_ids": [6766, 318, 298, 4, 322, 12, 1030, 4995, 11, 22, 1030],
"labels": [-100, -100, -100, -100, -100, -100, 1, -100, -100, -100, 0],
},
)
def test_tokenize_row_completion_truncation(self):
# Define the input features
features = {
"prompt": "Which number is larger, 9.8 or 9.11?",
"completions": ["11 is greater than 8.", "Hence, 9.11 > 9.8."],
"labels": [True, False],
}
# Call the method with truncation on the completion
result = PRMTrainer.tokenize_row(
features=features,
tokenizer=self.tokenizer,
step_separator="\n",
max_length=None,
max_prompt_length=None,
max_completion_length=6,
train_on_last_step_only=False,
is_eval=False,
)
self.assertEqual(
result,
{
"input_ids": [0, 465, 6766, 318, 298, 4, 322, 12, 1030, 4995, 11],
"labels": [-100, -100, -100, -100, -100, -100, -100, -100, 1, -100, -100],
},
)
def test_tokenize_row_prompt_completion_truncation(self):
# Define the input features
features = {
"prompt": "Which number is larger, 9.8 or 9.11?",
"completions": ["11 is greater than 8.", "Hence, 9.11 > 9.8."],
"labels": [True, False],
}
# Call the method with truncation on the prompt and completion
result = PRMTrainer.tokenize_row(
features=features,
tokenizer=self.tokenizer,
step_separator="\n",
max_length=9,
max_prompt_length=None,
max_completion_length=None,
train_on_last_step_only=False,
is_eval=False,
)
self.assertEqual(
result,
{
"input_ids": [0, 465, 6766, 318, 298, 4, 322, 12, 1030],
"labels": [-100, -100, -100, -100, -100, -100, -100, -100, 1],
},
)
def test_tokenize_row_multi_token_separator(self):
# Define the input features
features = {
"prompt": "Which number is larger, 9.8 or 9.11?",
"completions": ["11 is greater than 8.", "Hence, 9.11 > 9.8."],
"labels": [True, False],
}
# Call the method using multiple tokens as step_separator
result = PRMTrainer.tokenize_row(
features=features,
tokenizer=self.tokenizer,
step_separator="\n\n",
max_length=None,
max_prompt_length=None,
max_completion_length=None,
train_on_last_step_only=False,
is_eval=False,
)
self.assertEqual(
result,
{
"input_ids": [0, 465, 6766, 318, 298, 4, 322, 12, 1030, 1030, 4995, 11, 22, 1030, 1030],
"labels": [-100, -100, -100, -100, -100, -100, -100, -100, -100, 1, -100, -100, -100, -100, 0],
},
)
class PRMTrainerTester(TrlTestCase):
def setUp(self):
super().setUp()
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
self.model = AutoModelForTokenClassification.from_pretrained(model_id)
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
@parameterized.expand([True, False])
def test_train_full(self, train_on_last_step_only):
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_stepwise_supervision", split="train")
training_args = PRMConfig(
output_dir=self.tmp_dir,
report_to="none",
train_on_last_step_only=train_on_last_step_only,
)
trainer = PRMTrainer(
model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12))
def test_train_full_pretokenized(self):
dummy_dataset = Dataset.from_dict(
{
"labels": [
[-100, -100, -100, -100, -100, -100, -100, -100, -100, 0, -100, -100, 1],
[-100, -100, -100, -100, -100, -100, -100, -100, 0, -100, -100, 1, -100, -100, -100, -100, 0],
[-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 0, -100, -100, 1],
[-100, -100, -100, -100, -100, -100, -100, 1, -100, -100, 1],
[-100, -100, -100, -100, -100, -100, -100, -100, -100, 1, -100, -100, 0],
[-100, -100, -100, -100, -100, -100, -100, -100, -100, 1],
[-100, -100, -100, -100, -100, -100, -100, -100, -100, 0],
[-100, -100, -100, -100, -100, -100, -100, -100, -100, 1, -100, -100, -100, -100, -100, 0],
[-100, -100, -100, -100, -100, -100, -100, -100, 0, -100, -100, 0],
[-100, -100, -100, -100, -100, -100, 0, -100, -100, -100, -100, 0],
[-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 1],
[-100, -100, -100, -100, -100, -100, 0],
[-100, -100, -100, -100, -100, -100, -100, -100, 1],
[-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 0],
],
"input_ids": [
[46518, 374, 2664, 1091, 11, 1077, 752, 1744, 1112, 198, 27261, 13, 198],
[98923, 374, 2664, 1091, 11, 315, 3308, 11, 198, 17995, 13, 198, 1576, 31273, 12850, 13, 198],
[16374, 374, 2664, 1091, 1112, 1077, 594, 2506, 432, 6770, 11, 198, 6351, 13, 198],
[31137, 374, 2664, 1091, 979, 4362, 11, 198, 16965, 13, 198],
[31019, 374, 2664, 1091, 304, 3793, 315, 5944, 11, 198, 24034, 13, 198],
[98491, 374, 2664, 1091, 1112, 5310, 369, 91494, 13, 198],
[4418, 2897, 14579, 5310, 979, 3800, 1349, 432, 13, 198],
[20366, 5048, 7629, 944, 3281, 3322, 11, 7241, 1112, 198, 807, 1795, 279, 5601, 13, 198],
[15802, 14976, 487, 33327, 1045, 31787, 63443, 11, 198, 52400, 13, 198],
[13877, 1265, 2581, 1494, 49394, 11, 198, 7241, 20975, 91681, 13, 198],
[641, 279, 3579, 315, 71768, 11, 25066, 279, 61361, 311, 7942, 13, 198],
[7039, 374, 2664, 1091, 2937, 13, 198],
[26155, 374, 3545, 2664, 1091, 34933, 26537, 13, 198],
[2679, 279, 8129, 374, 4135, 311, 10339, 11, 432, 2578, 387, 264, 1661, 2884, 13, 198],
],
}
)
training_args = PRMConfig(output_dir=self.tmp_dir, report_to="none")
trainer = PRMTrainer(
model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12))
@require_peft
def test_train_lora(self):
peft_config = LoraConfig(
task_type=TaskType.TOKEN_CLS,
inference_mode=False,
r=8,
lora_alpha=32,
lora_dropout=0.1,
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_stepwise_supervision", split="train")
training_args = PRMConfig(output_dir=self.tmp_dir, max_steps=3, report_to="none")
trainer = PRMTrainer(
model=self.model,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset,
peft_config=peft_config,
)
previous_trainable_params = {}
previous_non_trainable_params = {}
# due to a change in the way the modules to save are dealt in PEFT.
trainable_params_name = ["lora", "modules_to_save"]
# check gradients are not None
for n, param in trainer.model.named_parameters():
if any(t in n for t in trainable_params_name):
previous_trainable_params[n] = param.clone()
else:
previous_non_trainable_params[n] = param.clone()
trainer.train()
self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
self.assertFalse(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12))
# Check that the non trainable parameters have not changed
for n, param in previous_non_trainable_params.items():
new_param = trainer.model.get_parameter(n)
self.assertTrue(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12))
def test_tags(self):
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_stepwise_supervision", split="train")
training_args = PRMConfig(output_dir=self.tmp_dir, report_to="none")
trainer = PRMTrainer(
model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset
)
self.assertEqual(trainer.model.model_tags, trainer._tag_names)
| trl/tests/test_prm_trainer.py/0 | {
"file_path": "trl/tests/test_prm_trainer.py",
"repo_id": "trl",
"token_count": 7464
} | 642 |
compute_environment: LOCAL_MACHINE
debug: false
distributed_type: "NO"
downcast_bf16: 'no'
gpu_ids: all
machine_rank: 0
main_training_function: main
mixed_precision: 'bf16'
num_machines: 1
num_processes: 8
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
| trl/trl/accelerate_configs/single_gpu.yaml/0 | {
"file_path": "trl/trl/accelerate_configs/single_gpu.yaml",
"repo_id": "trl",
"token_count": 128
} | 643 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
import torchvision
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import EntryNotFoundError
from transformers import CLIPModel, is_torch_npu_available, is_torch_xpu_available
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(768, 1024),
nn.Dropout(0.2),
nn.Linear(1024, 128),
nn.Dropout(0.2),
nn.Linear(128, 64),
nn.Dropout(0.1),
nn.Linear(64, 16),
nn.Linear(16, 1),
)
def forward(self, embed):
return self.layers(embed)
class AestheticScorer(torch.nn.Module):
"""
This model attempts to predict the aesthetic score of an image. The aesthetic score is a numerical approximation of
how much a specific image is liked by humans on average. This is from
https://github.com/christophschuhmann/improved-aesthetic-predictor
"""
def __init__(self, *, dtype, model_id, model_filename):
super().__init__()
self.clip = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
self.normalize = torchvision.transforms.Normalize(
mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]
)
self.target_size = 224
self.mlp = MLP()
try:
cached_path = hf_hub_download(model_id, model_filename)
except EntryNotFoundError:
cached_path = os.path.join(model_id, model_filename)
state_dict = torch.load(cached_path, map_location=torch.device("cpu"), weights_only=True)
self.mlp.load_state_dict(state_dict)
self.dtype = dtype
self.eval()
def __call__(self, images):
device = next(self.parameters()).device
images = torchvision.transforms.Resize(self.target_size)(images)
images = self.normalize(images).to(self.dtype).to(device)
embed = self.clip.get_image_features(pixel_values=images)
# normalize embedding
embed = embed / torch.linalg.vector_norm(embed, dim=-1, keepdim=True)
reward = self.mlp(embed).squeeze(1)
return reward
def aesthetic_scorer(hub_model_id, model_filename):
scorer = AestheticScorer(
model_id=hub_model_id,
model_filename=model_filename,
dtype=torch.float32,
)
if is_torch_npu_available():
scorer = scorer.npu()
elif is_torch_xpu_available():
scorer = scorer.xpu()
else:
scorer = scorer.cuda()
def _fn(images, prompts, metadata):
images = (images).clamp(0, 1)
scores = scorer(images)
return scores, {}
return _fn
| trl/trl/models/auxiliary_modules.py/0 | {
"file_path": "trl/trl/models/auxiliary_modules.py",
"repo_id": "trl",
"token_count": 1380
} | 644 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
import inspect
import logging
import os
import subprocess
import sys
from collections.abc import Iterable
from dataclasses import dataclass, field
from typing import Optional, Union
import datasets
import yaml
from datasets import DatasetDict, concatenate_datasets
from transformers import HfArgumentParser
from transformers.hf_argparser import DataClass, DataClassType
from transformers.utils import is_rich_available
logger = logging.getLogger(__name__)
@dataclass
class DatasetConfig:
"""
Configuration for a dataset.
This class matches the signature of [`~datasets.load_dataset`] and the arguments are used directly in the
`datasets.load_dataset` function. You can refer to the `datasets.load_dataset` documentation for more details.
Parameters:
path (`str`):
Path or name of the dataset.
name (`str`, *optional*, defaults to `None`):
Defining the name of the dataset configuration.
data_dir (`str`, *optional*, defaults to `None`):
Defining the `data_dir` of the dataset configuration. If specified for the generic builders(csv, text etc.)
or the Hub datasets and `data_files` is `None`, the behavior is equal to passing `os.path.join(data_dir,
**)` as `data_files` to reference all the files in a directory.
data_files (`str` or `Sequence` or `Mapping`, *optional*, defaults to `None`):
Path(s) to source data file(s).
split (`str`, *optional*, defaults to `"train"`):
Which split of the data to load.
columns (`list[str]`, *optional*, defaults to `None`):
List of column names to select from the dataset. If `None`, all columns are selected.
"""
path: str
name: Optional[str] = None
data_dir: Optional[str] = None
data_files: Optional[Union[str, list[str], dict[str, str]]] = None
split: str = "train"
columns: Optional[list[str]] = None
@dataclass
class DatasetMixtureConfig:
"""
Configuration class for a mixture of datasets.
Using [`~transformers.HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
datasets (`list[DatasetConfig]`):
List of dataset configurations to include in the mixture.
streaming (`bool`, *optional*, defaults to `False`):
Whether to stream the datasets. If `True`, the datasets will be loaded in streaming mode.
test_split_size (`float` or `None`, *optional*, defaults to `None`):
Size of the test split. Refer to the `test_size` parameter in the [`~datasets.train_test_split`] function
for more details. If `None`, the dataset will not be split into train and test sets.
Usage:
When using the CLI, you can add the following section to your YAML config file:
```yaml
datasets:
- path: ...
name: ...
data_dir: ...
data_files: ...
split: ...
columns: ...
- path: ...
name: ...
data_dir: ...
data_files: ...
split: ...
columns: ...
streaming: ...
test_split_size: ...
```
"""
datasets: list[DatasetConfig] = field(
default_factory=list,
metadata={"help": "List of dataset configurations to include in the mixture."},
)
streaming: bool = field(
default=False,
metadata={"help": "Whether to stream the datasets. If True, the datasets will be loaded in streaming mode."},
)
test_split_size: Optional[float] = field(
default=None,
metadata={
"help": "Size of the test split. Refer to the `test_size` parameter in the `datasets.train_test_split` "
"function for more details. If None, the dataset will not be split into train and test sets."
},
)
def __post_init__(self):
# Convert any dataset dicts (from CLI/config parsing) into DatasetConfig objects
for idx, dataset in enumerate(self.datasets):
if isinstance(dataset, dict):
# If it's a dict, convert it to DatasetConfig
self.datasets[idx] = DatasetConfig(**dataset)
@dataclass
class ScriptArguments:
"""
Arguments common to all scripts.
Args:
dataset_name (`str`, or `None`, *optional*, defaults to `None`):
Path or name of the dataset to load. If `datasets` is provided, this will be ignored.
dataset_config (`str` or `None`, *optional*, defaults to `None`):
Dataset configuration name. Corresponds to the `name` argument of the [`~datasets.load_dataset`] function.
If `datasets` is provided, this will be ignored.
dataset_train_split (`str`, *optional*, defaults to `"train"`):
Dataset split to use for training. If `datasets` is provided, this will be ignored.
dataset_test_split (`str`, *optional*, defaults to `"test"`):
Dataset split to use for evaluation. If `datasets` is provided, this will be ignored.
dataset_streaming (`bool`, *optional*, defaults to `False`):
Whether to stream the dataset. If True, the dataset will be loaded in streaming mode. If `datasets` is
provided, this will be ignored.
gradient_checkpointing_use_reentrant (`bool`, *optional*, defaults to `False`):
Whether to apply `use_reentrant` for gradient checkpointing.
ignore_bias_buffers (`bool`, *optional*, defaults to `False`):
Debug argument for distributed training. Fix for DDP issues with LM bias/mask buffers - invalid scalar
type, inplace operation. See
https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992.
"""
dataset_name: Optional[str] = field(
default=None,
metadata={"help": "Path or name of the dataset to load. If `datasets` is provided, this will be ignored."},
)
dataset_config: Optional[str] = field(
default=None,
metadata={
"help": "Dataset configuration name. Corresponds to the `name` argument of the `datasets.load_dataset` "
"function. If `datasets` is provided, this will be ignored."
},
)
dataset_train_split: str = field(
default="train",
metadata={"help": "Dataset split to use for training. If `datasets` is provided, this will be ignored."},
)
dataset_test_split: str = field(
default="test",
metadata={"help": "Dataset split to use for evaluation. If `datasets` is provided, this will be ignored."},
)
dataset_streaming: bool = field(
default=False,
metadata={
"help": "Whether to stream the dataset. If True, the dataset will be loaded in streaming mode. If "
"`datasets` is provided, this will be ignored."
},
)
gradient_checkpointing_use_reentrant: bool = field(
default=False,
metadata={"help": "Whether to apply `use_reentrant` for gradient checkpointing."},
)
ignore_bias_buffers: bool = field(
default=False,
metadata={
"help": "Debug argument for distributed training. Fix for DDP issues with LM bias/mask buffers - invalid "
"scalar type, inplace operation. See "
"https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992."
},
)
def init_zero_verbose():
"""
Perform zero verbose init - use this method on top of the CLI modules to make logging and warning output cleaner.
Uses Rich if available, falls back otherwise.
"""
import logging
import warnings
FORMAT = "%(message)s"
if is_rich_available():
from rich.logging import RichHandler
handler = RichHandler()
else:
handler = logging.StreamHandler()
logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[handler], level=logging.ERROR)
# Custom warning handler to redirect warnings to the logging system
def warning_handler(message, category, filename, lineno, file=None, line=None):
logging.warning(f"{filename}:{lineno}: {category.__name__}: {message}")
# Add the custom warning handler - we need to do that before importing anything to make sure the loggers work well
warnings.showwarning = warning_handler
class TrlParser(HfArgumentParser):
"""
A subclass of [`transformers.HfArgumentParser`] designed for parsing command-line arguments with dataclass-backed
configurations, while also supporting configuration file loading and environment variable management.
Args:
dataclass_types (`Union[DataClassType, Iterable[DataClassType]]` or `None`, *optional*, defaults to `None`):
Dataclass types to use for argument parsing.
**kwargs:
Additional keyword arguments passed to the [`transformers.HfArgumentParser`] constructor.
Examples:
```yaml
# config.yaml
env:
VAR1: value1
arg1: 23
```
```python
# main.py
import os
from dataclasses import dataclass
from trl import TrlParser
@dataclass
class MyArguments:
arg1: int
arg2: str = "alpha"
parser = TrlParser(dataclass_types=[MyArguments])
training_args = parser.parse_args_and_config()
print(training_args, os.environ.get("VAR1"))
```
```bash
$ python main.py --config config.yaml
(MyArguments(arg1=23, arg2='alpha'),) value1
$ python main.py --arg1 5 --arg2 beta
(MyArguments(arg1=5, arg2='beta'),) None
```
"""
def __init__(
self,
dataclass_types: Optional[Union[DataClassType, Iterable[DataClassType]]] = None,
**kwargs,
):
# Make sure dataclass_types is an iterable
if dataclass_types is None:
dataclass_types = []
elif not isinstance(dataclass_types, Iterable):
dataclass_types = [dataclass_types]
# Check that none of the dataclasses have the "config" field
for dataclass_type in dataclass_types:
if "config" in dataclass_type.__dataclass_fields__:
raise ValueError(
f"Dataclass {dataclass_type.__name__} has a field named 'config'. This field is reserved for the "
f"config file path and should not be used in the dataclass."
)
super().__init__(dataclass_types=dataclass_types, **kwargs)
def parse_args_and_config(
self,
args: Optional[Iterable[str]] = None,
return_remaining_strings: bool = False,
fail_with_unknown_args: bool = True,
) -> tuple[DataClass, ...]:
"""
Parse command-line args and config file into instances of the specified dataclass types.
This method wraps [`transformers.HfArgumentParser.parse_args_into_dataclasses`] and also parses the config file
specified with the `--config` flag. The config file (in YAML format) provides argument values that replace the
default values in the dataclasses. Command line arguments can override values set by the config file. The
method also sets any environment variables specified in the `env` field of the config file.
"""
args = list(args) if args is not None else sys.argv[1:]
if "--config" in args:
# Get the config file path from
config_index = args.index("--config")
args.pop(config_index) # remove the --config flag
config_path = args.pop(config_index) # get the path to the config file
with open(config_path) as yaml_file:
config = yaml.safe_load(yaml_file)
# Set the environment variables specified in the config file
if "env" in config:
env_vars = config.pop("env", {})
if not isinstance(env_vars, dict):
raise ValueError("`env` field should be a dict in the YAML file.")
for key, value in env_vars.items():
os.environ[key] = str(value)
# Set the defaults from the config values
config_remaining_strings = self.set_defaults_with_config(**config)
else:
config_remaining_strings = []
# Parse the arguments from the command line
output = self.parse_args_into_dataclasses(args=args, return_remaining_strings=return_remaining_strings)
# Merge remaining strings from the config file with the remaining strings from the command line
if return_remaining_strings:
args_remaining_strings = output[-1]
return output[:-1] + (config_remaining_strings + args_remaining_strings,)
elif fail_with_unknown_args and config_remaining_strings:
raise ValueError(
f"Unknown arguments from config file: {config_remaining_strings}. Please remove them, add them to the "
"dataclass, or set `fail_with_unknown_args=False`."
)
else:
return output
def set_defaults_with_config(self, **kwargs) -> list[str]:
"""
Overrides the parser's default values with those provided via keyword arguments, including for subparsers.
Any argument with an updated default will also be marked as not required if it was previously required.
Returns a list of strings that were not consumed by the parser.
"""
def apply_defaults(parser, kw):
used_keys = set()
for action in parser._actions:
# Handle subparsers recursively
if isinstance(action, argparse._SubParsersAction):
for subparser in action.choices.values():
used_keys.update(apply_defaults(subparser, kw))
elif action.dest in kw:
action.default = kw[action.dest]
action.required = False
used_keys.add(action.dest)
return used_keys
used_keys = apply_defaults(self, kwargs)
# Remaining args not consumed by the parser
remaining = [
item for key, value in kwargs.items() if key not in used_keys for item in (f"--{key}", str(value))
]
return remaining
def get_git_commit_hash(package_name):
try:
# Import the package to locate its path
package = importlib.import_module(package_name)
# Get the path to the package using inspect
package_path = os.path.dirname(inspect.getfile(package))
# Navigate up to the Git repository root if the package is inside a subdirectory
git_repo_path = os.path.abspath(os.path.join(package_path, ".."))
git_dir = os.path.join(git_repo_path, ".git")
if os.path.isdir(git_dir):
# Run the git command to get the current commit hash
commit_hash = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=git_repo_path).strip().decode("utf-8")
)
return commit_hash
else:
return None
except Exception as e:
return f"Error: {str(e)}"
def get_dataset(mixture_config: DatasetMixtureConfig) -> DatasetDict:
"""
Load a mixture of datasets based on the configuration.
Args:
mixture_config (`DatasetMixtureConfig`):
Script arguments containing dataset configuration.
Returns:
`DatasetDict`:
Combined dataset(s) from the mixture configuration, with optional train/test split if `test_split_size` is
set.
Example:
```python
from trl import DatasetMixtureConfig, get_dataset
from trl.scripts.utils import DatasetConfig
mixture_config = DatasetMixtureConfig(datasets=[DatasetConfig(path="trl-lib/tldr")])
dataset = get_dataset(mixture_config)
print(dataset)
```
```
DatasetDict({
train: Dataset({
features: ['prompt', 'completion'],
num_rows: 116722
})
})
```
"""
logger.info(f"Creating dataset mixture with {len(mixture_config.datasets)} datasets")
datasets_list = []
for dataset_config in mixture_config.datasets:
logger.info(f"Loading dataset for mixture: {dataset_config.path} (config name: {dataset_config.name})")
dataset = datasets.load_dataset(
path=dataset_config.path,
name=dataset_config.name,
data_dir=dataset_config.data_dir,
data_files=dataset_config.data_files,
split=dataset_config.split,
streaming=mixture_config.streaming,
)
if dataset_config.columns is not None:
dataset = dataset.select_columns(dataset_config.columns)
datasets_list.append(dataset)
if datasets_list:
combined_dataset = concatenate_datasets(datasets_list)
if isinstance(combined_dataset, datasets.Dataset): # IterableDataset does not have a length
logger.info(f"Created dataset mixture with {len(combined_dataset)} examples")
if mixture_config.test_split_size is not None:
logger.info(f"Splitting dataset into train and test sets with test size: {mixture_config.test_split_size}")
combined_dataset = combined_dataset.train_test_split(test_size=mixture_config.test_split_size)
return combined_dataset
else:
return DatasetDict({"train": combined_dataset})
else:
raise ValueError("No datasets were loaded from the mixture configuration")
| trl/trl/scripts/utils.py/0 | {
"file_path": "trl/trl/scripts/utils.py",
"repo_id": "trl",
"token_count": 7267
} | 645 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import textwrap
from typing import Any, Callable, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from datasets import Dataset
from transformers import (
AutoModelForCausalLM,
BaseImageProcessor,
DataCollator,
FeatureExtractionMixin,
GenerationConfig,
PreTrainedModel,
PreTrainedTokenizerBase,
ProcessorMixin,
is_wandb_available,
)
from transformers.trainer_callback import TrainerCallback
from transformers.trainer_utils import EvalPrediction
from transformers.utils import is_peft_available
from ..models import prepare_deepspeed
from ..models.utils import unwrap_model_for_generation
from .gkd_config import GKDConfig
from .sft_trainer import SFTTrainer
from .utils import (
DataCollatorForChatML,
disable_dropout_in_model,
empty_cache,
generate_model_card,
get_comet_experiment_url,
)
if is_peft_available():
from peft import PeftConfig
if is_wandb_available():
import wandb
class GKDTrainer(SFTTrainer):
_tag_names = ["trl", "gkd"]
def __init__(
self,
model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
teacher_model: Union[PreTrainedModel, nn.Module, str] = None,
args: Optional[GKDConfig] = None,
data_collator: Optional[DataCollator] = None, # type: ignore
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
processing_class: Optional[
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
] = None,
compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None,
callbacks: Optional[list[TrainerCallback]] = None,
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
peft_config: Optional["PeftConfig"] = None,
formatting_func: Optional[Callable] = None,
):
# add remove_unused_columns=False to the dataclass args
args.remove_unused_columns = False
data_collator = DataCollatorForChatML(tokenizer=processing_class, max_length=args.max_length)
super().__init__(
model,
args=args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
processing_class=processing_class,
compute_metrics=compute_metrics,
callbacks=callbacks,
optimizers=optimizers,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
peft_config=peft_config,
formatting_func=formatting_func,
)
if args.teacher_model_init_kwargs is None:
teacher_model_init_kwargs = {}
elif not isinstance(teacher_model, str):
raise ValueError(
"You passed teacher_model_init_kwargs to the GKDConfig, but your teacher_model is already instantiated."
)
else:
teacher_model_init_kwargs = args.teacher_model_init_kwargs
teacher_model_init_kwargs["torch_dtype"] = (
teacher_model_init_kwargs["torch_dtype"]
if teacher_model_init_kwargs["torch_dtype"] in ["auto", None]
else getattr(torch, teacher_model_init_kwargs["torch_dtype"])
)
if isinstance(teacher_model, str):
teacher_model = AutoModelForCausalLM.from_pretrained(teacher_model, **teacher_model_init_kwargs)
# Disable dropout in the model
if args.disable_dropout:
disable_dropout_in_model(self.model)
if self.is_deepspeed_enabled:
self.teacher_model = prepare_deepspeed(teacher_model, self.accelerator)
else:
self.teacher_model = self.accelerator.prepare_model(teacher_model, evaluation_mode=True)
self.lmbda = args.lmbda
self.beta = args.beta
self.temperature = args.temperature
self.seq_kd = args.seq_kd
self.generation_config = GenerationConfig(
max_new_tokens=args.max_new_tokens,
temperature=args.temperature,
do_sample=True,
top_k=0,
use_cache=False if args.gradient_checkpointing else True,
pad_token_id=self.processing_class.pad_token_id,
)
# Set custom EOS tokens if they are specified by the model's generation
# config. This is important for models with the Llama 3 chat template,
# which use special tokens <|eot_id|> and <|eom_id|> to mark the end of
# turns or messages.
if (
hasattr(self.model.generation_config, "eos_token_id")
and self.model.generation_config.eos_token_id is not None
):
self.generation_config.eos_token_id = self.model.generation_config.eos_token_id
@staticmethod
def generalized_jsd_loss(
student_logits, teacher_logits, labels=None, beta=0.5, temperature=1.0, reduction="batchmean"
):
"""
Compute the generalized Jensen-Shannon Divergence loss for knowledge distillation using F.kl_div. See Eq. (1)
of https://huggingface.co/papers/2306.13649 for the definition.
Args:
student_logits:
Tensor of shape (batch_size, sequence_length, vocab_size)
teacher_logits:
Tensor of shape (batch_size, sequence_length, vocab_size)
labels:
Tensor of shape (batch_size, sequence_length) with -100 for padding tokens to ignore when computing
loss
beta:
Interpolation coefficient between 0 and 1 (default: 0.5)
temperature:
Softmax temperature (default: 1.0)
reduction:
Specifies the reduction to apply to the output (default: 'batchmean')
Returns:
loss: Scalar tensor with the generalized JSD loss
"""
# Apply temperature scaling
student_logits = student_logits / temperature
teacher_logits = teacher_logits / temperature
# Compute log probabilities for student and probabilities for teacher
student_log_probs = F.log_softmax(student_logits, dim=-1)
teacher_log_probs = F.log_softmax(teacher_logits, dim=-1)
if beta == 0:
jsd = F.kl_div(student_log_probs, teacher_log_probs, reduction="none", log_target=True)
elif beta == 1:
jsd = F.kl_div(teacher_log_probs, student_log_probs, reduction="none", log_target=True)
else:
# Compute the log of the mixture distribution
# log(a + b) = log(exp(log(a)) + exp(log(b))) -> for mixture
beta = torch.tensor(beta, dtype=student_log_probs.dtype)
mixture_log_probs = torch.logsumexp(
torch.stack([student_log_probs + torch.log(1 - beta), teacher_log_probs + torch.log(beta)]),
dim=0,
)
# Compute KL divergences using F.kl_div
# PyTorch differs from the standard mathematical definition, so the order of the probability distributions is swapped compared to that defined in the paper.
kl_teacher = F.kl_div(mixture_log_probs, teacher_log_probs, reduction="none", log_target=True)
kl_student = F.kl_div(mixture_log_probs, student_log_probs, reduction="none", log_target=True)
# Compute the Generalized Jensen-Shannon Divergence
jsd = beta * kl_teacher + (1 - beta) * kl_student
# Masking
if labels is not None:
mask = labels != -100
jsd = jsd[mask]
# Apply reduction
if reduction == "batchmean":
return jsd.sum() / mask.sum() if labels is not None else jsd.sum() / (jsd.size(0) * jsd.size(1))
elif reduction == "sum":
return jsd.sum()
elif reduction == "mean":
return jsd.mean()
else:
return jsd
def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
# compute student output
outputs_student = model(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
)
# compute teacher output in eval mode
self.teacher_model.eval()
with torch.no_grad():
outputs_teacher = self.teacher_model(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
)
# slice the logits for the generated tokens using the inputs["prompts"] lengths
prompt_lengths = inputs["prompts"].shape[1]
shifted_student_logits = outputs_student.logits[:, prompt_lengths - 1 : -1, :]
shifted_teacher_logits = outputs_teacher.logits[:, prompt_lengths - 1 : -1, :]
shifted_labels = inputs["labels"][:, prompt_lengths:]
# compute loss
loss = self.generalized_jsd_loss(
student_logits=shifted_student_logits,
teacher_logits=shifted_teacher_logits,
labels=shifted_labels,
beta=self.beta,
)
# empty cache
empty_cache()
# Return loss
return (loss, outputs_student) if return_outputs else loss
@staticmethod
def generate_on_policy_outputs(model, inputs, generation_config, pad_token_id=None):
# Generate output with respect to the prompt-only
generated_outputs = model.generate(
input_ids=inputs["prompts"],
attention_mask=inputs.get("prompt_attention_mask", None),
generation_config=generation_config,
return_dict_in_generate=True,
)
# Get the generated token IDs
generated_tokens = generated_outputs.sequences
# Calculate new attention mask
new_attention_mask = torch.ones_like(generated_tokens)
new_labels = generated_tokens.clone()
# If there's pad_token_id, set attention mask to 0 for padding tokens
if pad_token_id is not None:
new_labels[new_labels == pad_token_id] = -100
new_attention_mask[generated_tokens == pad_token_id] = 0
return generated_tokens, new_attention_mask, new_labels
def training_step(
self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[int] = None
) -> torch.Tensor:
"""
Perform a training step for the Generalized Knowledge Distillation (GKD) model.
This method implements the on-policy learning approach described in the GKD paper. With probability
`self.lmbda`, it generates new responses using the student model, which are then used for training instead of
the original inputs.
"""
if self.seq_kd:
with unwrap_model_for_generation(self.teacher_model, self.accelerator) as unwrapped_model:
new_input_ids, new_attention_mask, new_labels = self.generate_on_policy_outputs(
unwrapped_model, inputs, self.generation_config, self.processing_class.pad_token_id
)
inputs["input_ids"] = new_input_ids
inputs["attention_mask"] = new_attention_mask
inputs["labels"] = new_labels
if random.random() <= self.lmbda:
with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model:
new_input_ids, new_attention_mask, new_labels = self.generate_on_policy_outputs(
unwrapped_model, inputs, self.generation_config, self.processing_class.pad_token_id
)
inputs["input_ids"] = new_input_ids
inputs["attention_mask"] = new_attention_mask
inputs["labels"] = new_labels
loss = super().training_step(model, inputs, num_items_in_batch)
return loss
def create_model_card(
self,
model_name: Optional[str] = None,
dataset_name: Optional[str] = None,
tags: Union[str, list[str], None] = None,
):
"""
Creates a draft of a model card using the information available to the `Trainer`.
Args:
model_name (`str` or `None`, *optional*, defaults to `None`):
Name of the model.
dataset_name (`str` or `None`, *optional*, defaults to `None`):
Name of the dataset used for training.
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
Tags to be associated with the model card.
"""
if not self.is_world_process_zero():
return
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
base_model = self.model.config._name_or_path
else:
base_model = None
# normalize `tags` to a mutable set
if tags is None:
tags = set()
elif isinstance(tags, str):
tags = {tags}
else:
tags = set(tags)
if hasattr(self.model.config, "unsloth_version"):
tags.add("unsloth")
tags.update(self._tag_names)
# docstyle-ignore
citation = textwrap.dedent("""\
@inproceedings{agarwal2024on-policy,
title = {{On-Policy Distillation of Language Models: Learning from Self-Generated Mistakes}},
author = {Rishabh Agarwal and Nino Vieillard and Yongchao Zhou and Piotr Stanczyk and Sabela Ramos Garea and Matthieu Geist and Olivier Bachem},
year = 2024,
booktitle = {The Twelfth International Conference on Learning Representations, {ICLR} 2024, Vienna, Austria, May 7-11, 2024},
publisher = {OpenReview.net},
url = {https://openreview.net/forum?id=3zKtaqxLhW},
}""")
model_card = generate_model_card(
base_model=base_model,
model_name=model_name,
hub_model_id=self.hub_model_id,
dataset_name=dataset_name,
tags=tags,
wandb_url=wandb.run.url if is_wandb_available() and wandb.run is not None else None,
comet_url=get_comet_experiment_url(),
trainer_name="GKD",
trainer_citation=citation,
paper_title="On-Policy Distillation of Language Models: Learning from Self-Generated Mistakes",
paper_id="2306.13649",
)
model_card.save(os.path.join(self.args.output_dir, "README.md"))
| trl/trl/trainer/gkd_trainer.py/0 | {
"file_path": "trl/trl/trainer/gkd_trainer.py",
"repo_id": "trl",
"token_count": 6674
} | 646 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import math
import os
import textwrap
import time
from collections import defaultdict
from contextlib import contextmanager, nullcontext
from pathlib import Path
from typing import Optional, Union
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from accelerate import Accelerator
from accelerate.utils import broadcast, gather_object
from datasets import Dataset
from torch.utils.data import DataLoader
from transformers import (
BaseImageProcessor,
DataCollatorWithPadding,
FeatureExtractionMixin,
GenerationConfig,
PreTrainedTokenizerBase,
ProcessorMixin,
Trainer,
TrainerCallback,
TrainerControl,
is_wandb_available,
)
from transformers.integrations import get_reporting_integration_callbacks
from transformers.trainer import DEFAULT_CALLBACKS, DEFAULT_PROGRESS_CALLBACK
from transformers.trainer_callback import CallbackHandler, ExportableState, PrinterCallback
from transformers.utils import is_peft_available, is_rich_available
from ..core import masked_mean, masked_whiten
from ..models import create_reference_model
from ..models.utils import unwrap_model_for_generation
from .ppo_config import PPOConfig
from .utils import (
OnlineTrainerState,
batch_generation,
disable_dropout_in_model,
empty_cache,
exact_div,
first_true_indices,
forward,
generate_model_card,
get_comet_experiment_url,
get_reward,
log_table_to_comet_experiment,
peft_module_casting_to_bf16,
prepare_deepspeed,
print_rich_table,
selective_log_softmax,
truncate_response,
)
if is_peft_available():
from peft import PeftConfig, PeftModel, get_peft_model
if is_wandb_available():
import wandb
INVALID_LOGPROB = 1.0
# taken from https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/ppo/ppo_trainer.py#L29
# we did this we can do a single `model = accelerator.prepare(model)`
class PolicyAndValueWrapper(nn.Module):
def __init__(self, policy, value_model) -> None:
super().__init__()
self.policy = policy
self.value_model = value_model
self.critic_backbone = getattr(value_model, value_model.base_model_prefix)
self.is_gradient_checkpointing = policy.is_gradient_checkpointing
def forward(self, **kwargs):
output = self.critic_backbone(**kwargs)
logits = self.value_model.score(output.hidden_states[-1])
return self.policy(**kwargs), logits
class PPOTrainer(Trainer):
_tag_names = ["trl", "ppo"]
def __init__(
self,
args: PPOConfig,
processing_class: Optional[
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
],
model: nn.Module,
ref_model: Optional[nn.Module],
reward_model: nn.Module,
train_dataset: Dataset,
value_model: nn.Module,
data_collator: Optional[DataCollatorWithPadding] = None,
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
# less commonly used
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
callbacks: Optional[list[TrainerCallback]] = None,
peft_config: Optional["PeftConfig"] = None,
) -> None:
if ref_model is model:
raise ValueError(
"`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the "
"same as `model`, you must make a copy of it, or `None` if you use peft."
)
self.args = args
self.processing_class = processing_class
self.policy_model = model
# Define the collator if not provided
if data_collator is None:
data_collator = DataCollatorWithPadding(self.processing_class)
# Handle stop token settings: update policy model's generation_config to use provided stop token
if args.stop_token and args.stop_token_id:
raise ValueError("You cannot set both `stop_token` and `stop_token_id`.")
elif args.stop_token:
if args.stop_token == "eos":
self.policy_model.generation_config.eos_token_id = self.stop_token_id = processing_class.eos_token_id
else:
raise ValueError(
f"Unknown `stop_token` {args.stop_token}. Allowed values are: `'eos'` and `None` (no stop token)."
)
else:
self.policy_model.generation_config.eos_token_id = self.stop_token_id = args.stop_token_id # None or int
# Check that the kl estimator is valid
if self.args.kl_estimator not in {"k1", "k3"}:
raise ValueError(
"kl_estimator must be either 'k1' (straightforward, unbiased) or 'k3' (lower variance, unbiased, "
"appears to be a strictly better estimator). See "
"[Approximating KL Divergence](http://joschu.net/blog/kl-approx.html) for details."
)
# peft support
if not is_peft_available() and peft_config is not None:
raise ImportError(
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models"
)
elif is_peft_available() and peft_config is not None:
# if model is a peft model and we have a peft_confg, we merge and unload it first
if isinstance(self.policy_model, PeftModel):
self.policy_model = self.policy_model.merge_and_unload()
# get peft model with the given config
self.policy_model = get_peft_model(self.policy_model, peft_config)
if args.bf16 and getattr(self.policy_model, "is_loaded_in_4bit", False):
peft_module_casting_to_bf16(self.policy_model)
self.is_peft_model = is_peft_available() and isinstance(self.policy_model, PeftModel)
self.model_adapter_name = args.model_adapter_name
self.ref_adapter_name = args.ref_adapter_name
if ref_model:
self.ref_model = ref_model
elif self.is_peft_model:
self.ref_model = None
else:
self.ref_model = create_reference_model(self.policy_model)
self.reward_model = reward_model
self.train_dataset = train_dataset
self.train_dataset_len = len(train_dataset)
self.value_model = value_model
self.data_collator = data_collator
self.eval_dataset = eval_dataset
self.optimizer, self.lr_scheduler = optimizers
self.optimizer_cls_and_kwargs = None # needed for transformers >= 4.47
#########
# calculate various batch sizes
#########
if args.total_episodes is None: # allow the users to define episodes in terms of epochs.
args.total_episodes = int(args.num_train_epochs * self.train_dataset_len)
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps)
self.accelerator = accelerator
args.world_size = accelerator.num_processes
args.local_batch_size = args.per_device_train_batch_size * args.gradient_accumulation_steps
args.micro_batch_size = int(args.per_device_train_batch_size * args.world_size)
args.batch_size = int(args.local_batch_size * args.world_size)
args.mini_batch_size = exact_div(
args.batch_size, args.num_mini_batches, "`batch_size` must be a multiple of `num_mini_batches`"
)
args.local_mini_batch_size = exact_div(
args.local_batch_size, args.num_mini_batches, "`local_batch_size` must be a multiple of `num_mini_batches`"
)
if args.whiten_rewards:
assert args.local_mini_batch_size >= 8, (
f"Per-rank minibatch size {args.local_mini_batch_size} is insufficient for whitening"
)
# `per_rank_rollout_batch_size` is our `args.local_batch_size`
# `per_rank_minibatch_size` is our `args.local_mini_batch_size`
args.num_total_batches = math.ceil(
args.total_episodes / args.batch_size
) # we may train for more than `total_episodes`
time_tensor = torch.tensor(int(time.time()), device=accelerator.device)
time_int = broadcast(time_tensor, 0).item() # avoid different timestamps across processes
args.run_name = f"{args.exp_name}__{args.seed}__{time_int}"
self.local_seed = args.seed + accelerator.process_index * 100003 # Prime
if args.num_sample_generations > 0:
self.sample_generations_freq = max(1, args.num_total_batches // args.num_sample_generations)
self.local_dataloader_batch_size = args.local_batch_size
#########
# setup model, optimizer, and others
#########
for module in [self.policy_model, self.ref_model, self.value_model, self.reward_model]:
if module is not None:
disable_dropout_in_model(module)
self.model = PolicyAndValueWrapper(self.policy_model, self.value_model)
self.model.config = self.policy_model.config # needed for pushing to hub
self.create_optimizer_and_scheduler(
num_training_steps=args.num_total_batches
) # note that we are calling `self.lr_scheduler.step()` manually only at the batch level
#########
### trainer specifics
#########
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
self.callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
self.callbacks, self.model, self.processing_class, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
self.control = TrainerControl()
self.state = OnlineTrainerState(
is_local_process_zero=self.is_local_process_zero(),
is_world_process_zero=self.is_world_process_zero(),
stateful_callbacks=[
cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState)
],
)
self.current_flos = 0
self.hp_search_backend = None
self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None
self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None
# Create distant repo and output directory if needed
self.hub_model_id = None
if self.args.push_to_hub:
self.init_hf_repo()
if self.args.should_save:
os.makedirs(self.args.output_dir, exist_ok=True)
# Add tags for models that have been loaded with the correct transformers version
if hasattr(self.model, "add_model_tags"):
self.model.add_model_tags(self._tag_names)
#########
### setup dataloader
#########
self.dataloader = DataLoader(
self.train_dataset,
batch_size=self.local_dataloader_batch_size,
shuffle=True,
collate_fn=self.data_collator,
drop_last=True, # needed; otherwise the last batch will be of ragged shape
)
# sync random states for DataLoader(shuffle=True) before `accelerator.prepare`
# see https://gist.github.com/vwxyzjn/2581bff1e48e185e0b85b6dfe1def79c
torch.manual_seed(args.seed)
self.model, self.optimizer, self.dataloader = accelerator.prepare(self.model, self.optimizer, self.dataloader)
torch.manual_seed(self.local_seed) # reset the local seed again
self.eval_dataloader = DataLoader(
self.eval_dataset,
batch_size=args.per_device_eval_batch_size,
collate_fn=self.data_collator,
drop_last=True,
) # no need to shuffle eval dataset
self.eval_dataloader = accelerator.prepare(self.eval_dataloader)
if self.is_deepspeed_enabled:
self.reward_model = prepare_deepspeed(
self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16
)
if self.ref_model is None:
if not self.is_peft_model:
raise ValueError("No reference model and model is not a Peft model.")
else:
self.ref_model = prepare_deepspeed(
self.ref_model, args.per_device_train_batch_size, args.fp16, args.bf16
)
else:
if self.ref_model is None:
if not self.is_peft_model:
raise ValueError("No reference model and model is not a Peft model.")
else:
self.ref_model = self.ref_model.to(self.accelerator.device)
self.reward_model = self.reward_model.to(self.accelerator.device)
def get_train_dataloader(self) -> DataLoader:
return self.dataloader
def get_eval_dataloader(self) -> DataLoader:
return self.eval_dataloader
@contextmanager
def null_ref_context(self):
"""Context manager for handling null reference model (that is, peft adapter manipulation)."""
with (
self.accelerator.unwrap_model(self.model.policy).disable_adapter()
if self.is_peft_model and not self.ref_adapter_name
else nullcontext()
):
if self.ref_adapter_name:
self.model.policy.set_adapter(self.ref_adapter_name)
yield
if self.ref_adapter_name:
self.model.policy.set_adapter(self.model_adapter_name or "default")
def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False):
backup_model = self.model
self.model = self.model.policy # save only the policy
if self.is_deepspeed_enabled:
backup_deepspeed = self.deepspeed
self.deepspeed = self.model
super().save_model(output_dir, _internal_call)
self.model = backup_model
if self.is_deepspeed_enabled:
self.deepspeed = backup_deepspeed
def train(self):
args = self.args
accelerator = self.accelerator
optimizer = self.optimizer
model = self.model
ref_policy = self.ref_model
reward_model = self.reward_model
processing_class = self.processing_class
dataloader = self.dataloader
device = accelerator.device
def repeat_generator():
while True:
yield from dataloader
iter_dataloader = iter(repeat_generator())
generation_config = GenerationConfig(
max_new_tokens=args.response_length,
temperature=(args.temperature + 1e-7),
top_k=0.0,
top_p=1.0,
do_sample=True,
)
accelerator.print("===training policy===")
start_time = time.time()
stats_shape = (args.num_ppo_epochs, args.num_mini_batches, args.gradient_accumulation_steps)
approxkl_stats = torch.zeros(stats_shape, device=device)
pg_clipfrac_stats = torch.zeros(stats_shape, device=device)
pg_loss_stats = torch.zeros(stats_shape, device=device)
vf_loss_stats = torch.zeros(stats_shape, device=device)
vf_clipfrac_stats = torch.zeros(stats_shape, device=device)
entropy_stats = torch.zeros(stats_shape, device=device)
ratio_stats = torch.zeros(stats_shape, device=device)
model.train()
# trainer state initialization
self.state.global_step = 0
self.state.episode = 0
self.state.max_steps = args.num_total_batches
self.state.num_train_epochs = args.total_episodes / self.train_dataset_len
# Compute absolute values for logging, eval, and save if given as ratio
if args.logging_steps is not None:
if args.logging_steps < 1:
self.state.logging_steps = math.ceil(self.state.max_steps * args.logging_steps)
else:
self.state.logging_steps = args.logging_steps
if args.eval_steps is not None:
if args.eval_steps < 1:
self.state.eval_steps = math.ceil(self.state.max_steps * args.eval_steps)
else:
self.state.eval_steps = args.eval_steps
if args.save_steps is not None:
if args.save_steps < 1:
self.state.save_steps = math.ceil(self.state.max_steps * args.save_steps)
else:
self.state.save_steps = args.save_steps
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# backward compatibility
if self.is_deepspeed_enabled:
self.deepspeed = self.model
self.model_wrapped = self.model
for update in range(1, args.num_total_batches + 1):
self.state.episode += 1 * args.batch_size
data = next(iter_dataloader)
with torch.no_grad():
queries = data["input_ids"].to(device)
context_length = queries.shape[1]
responses = []
postprocessed_responses = []
logprobs = []
ref_logprobs = []
scores = []
sequence_lengths = []
values = []
with unwrap_model_for_generation(
self.model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation
) as unwrapped_model:
query_responses, logitss = batch_generation(
unwrapped_model.policy,
queries,
args.local_rollout_forward_batch_size,
processing_class.pad_token_id,
generation_config,
)
for i in range(0, queries.shape[0], args.local_rollout_forward_batch_size):
query = queries[i : i + args.local_rollout_forward_batch_size]
query_response = query_responses[i : i + args.local_rollout_forward_batch_size]
response = query_response[:, context_length:]
logits = logitss[i : i + args.local_rollout_forward_batch_size]
logprob = selective_log_softmax(logits, response)
del logits
empty_cache()
if ref_policy is None:
with self.null_ref_context():
ref_output = forward(model.policy, query_response, processing_class.pad_token_id)
else:
ref_output = forward(ref_policy, query_response, processing_class.pad_token_id)
ref_logits = ref_output.logits[:, context_length - 1 : -1]
ref_logits /= args.temperature + 1e-7
ref_logprob = selective_log_softmax(ref_logits, response)
del ref_output, ref_logits
empty_cache()
# Response Processing 1. truncate response after the first occurrence of `stop_token_id`
postprocessed_response = response
if self.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0
postprocessed_response = truncate_response(
self.stop_token_id, processing_class.pad_token_id, response
)
# Response Processing 2. run reward model on the truncated responses
postprocessed_query_response = torch.cat((query, postprocessed_response), 1)
sequence_length = first_true_indices(postprocessed_response == processing_class.pad_token_id) - 1
unwrapped_value_model = accelerator.unwrap_model(model).value_model
full_value, _, _ = get_reward(
unwrapped_value_model, query_response, processing_class.pad_token_id, context_length
)
value = full_value[:, context_length - 1 : -1].squeeze(-1)
_, score, _ = get_reward(
reward_model, postprocessed_query_response, processing_class.pad_token_id, context_length
)
responses.append(response)
postprocessed_responses.append(postprocessed_response)
logprobs.append(logprob)
ref_logprobs.append(ref_logprob)
sequence_lengths.append(sequence_length)
scores.append(score)
values.append(value)
responses = torch.cat(responses, 0)
postprocessed_responses = torch.cat(postprocessed_responses, 0)
logprobs = torch.cat(logprobs, 0)
ref_logprobs = torch.cat(ref_logprobs, 0)
sequence_lengths = torch.cat(sequence_lengths, 0)
scores = torch.cat(scores, 0)
values = torch.cat(values, 0)
del (logprob, ref_logprob, full_value, value, score, unwrapped_model)
empty_cache()
gc.collect()
# Response Processing 3. Filter completion. Ensure that the sample contains stop_token_id
# Completions not passing that filter will receive a lower score.
contain_eos_token = torch.any(postprocessed_responses == self.processing_class.eos_token_id, dim=-1)
if self.args.missing_eos_penalty is not None:
scores[~contain_eos_token] -= self.args.missing_eos_penalty
# accelerator.print(f"{scores=}, {(contain_eos_token.sum() / len(contain_eos_token))=}")
# be very careful with `padding_mask_p1`; see https://excalidraw.com/#json=LWnzG4w2k5DjF_EOL_xPt,e2w3a-hFJ_gX5vOfeyXGTw
response_idxs = torch.arange(responses.shape[1], device=responses.device).repeat(responses.shape[0], 1)
padding_mask = response_idxs > sequence_lengths.unsqueeze(1)
logprobs = torch.masked_fill(logprobs, padding_mask, INVALID_LOGPROB)
ref_logprobs = torch.masked_fill(ref_logprobs, padding_mask, INVALID_LOGPROB)
sequence_lengths_p1 = sequence_lengths + 1
padding_mask_p1 = response_idxs > (sequence_lengths_p1.unsqueeze(1))
values = torch.masked_fill(values, padding_mask_p1, 0)
# 4. compute rewards
# Formula used by http://joschu.net/blog/kl-approx.html for the k1 and k3 estimators
logr = ref_logprobs - logprobs
kl = -logr if args.kl_estimator == "k1" else (logr.exp() - 1) - logr # Else statement is k3
non_score_reward = -args.kl_coef * kl
rewards = non_score_reward.clone()
actual_start = torch.arange(rewards.size(0), device=rewards.device)
actual_end = torch.where(sequence_lengths_p1 < rewards.size(1), sequence_lengths_p1, sequence_lengths)
rewards[[actual_start, actual_end]] += scores
# 5. whiten rewards
if args.whiten_rewards:
rewards = masked_whiten(rewards, mask=~padding_mask_p1, shift_mean=False)
rewards = torch.masked_fill(rewards, padding_mask_p1, 0)
# 6. compute advantages and returns
lastgaelam = 0
advantages_reversed = []
gen_length = responses.shape[1]
for t in reversed(range(gen_length)):
nextvalues = values[:, t + 1] if t < gen_length - 1 else 0.0
delta = rewards[:, t] + args.gamma * nextvalues - values[:, t]
lastgaelam = delta + args.gamma * args.lam * lastgaelam
advantages_reversed.append(lastgaelam)
advantages = torch.stack(advantages_reversed[::-1], axis=1)
returns = advantages + values
advantages = masked_whiten(advantages, ~padding_mask)
advantages = torch.masked_fill(advantages, padding_mask, 0)
empty_cache()
# Do multiple epochs of PPO training, with a fresh random shuffle in each epoch
for ppo_epoch_idx in range(args.num_ppo_epochs):
b_inds = np.random.permutation(args.local_batch_size)
minibatch_idx = 0
for mini_batch_start in range(0, args.local_batch_size, args.local_mini_batch_size):
mini_batch_end = mini_batch_start + args.local_mini_batch_size
mini_batch_inds = b_inds[mini_batch_start:mini_batch_end]
gradient_accumulation_idx = 0
for micro_batch_start in range(0, args.local_mini_batch_size, args.per_device_train_batch_size):
with accelerator.accumulate(model):
micro_batch_end = micro_batch_start + args.per_device_train_batch_size
micro_batch_inds = mini_batch_inds[micro_batch_start:micro_batch_end]
mb_advantage = advantages[micro_batch_inds]
mb_responses = responses[micro_batch_inds]
mb_query_responses = query_responses[micro_batch_inds]
mb_logprobs = logprobs[micro_batch_inds]
mb_return = returns[micro_batch_inds]
mb_values = values[micro_batch_inds]
output, vpred_temp = forward(model, mb_query_responses, processing_class.pad_token_id)
logits = output.logits[:, context_length - 1 : -1]
logits /= args.temperature + 1e-7
new_logprobs = selective_log_softmax(logits, mb_responses)
new_logprobs = torch.masked_fill(
new_logprobs, padding_mask[micro_batch_inds], INVALID_LOGPROB
)
vpred = vpred_temp[:, context_length - 1 : -1].squeeze(-1)
vpred = torch.masked_fill(vpred, padding_mask_p1[micro_batch_inds], 0)
vpredclipped = torch.clamp(
vpred,
mb_values - args.cliprange_value,
mb_values + args.cliprange_value,
)
vf_losses1 = torch.square(vpred - mb_return)
vf_losses2 = torch.square(vpredclipped - mb_return)
vf_loss_max = torch.max(vf_losses1, vf_losses2)
vf_loss = 0.5 * masked_mean(vf_loss_max, ~padding_mask_p1[micro_batch_inds])
vf_clipfrac = masked_mean(
(vf_losses2 > vf_losses1).float(), ~padding_mask_p1[micro_batch_inds]
)
logprobs_diff = new_logprobs - mb_logprobs
ratio = torch.exp(logprobs_diff)
pg_losses = -mb_advantage * ratio
pg_losses2 = -mb_advantage * torch.clamp(ratio, 1.0 - args.cliprange, 1.0 + args.cliprange)
pg_loss_max = torch.max(pg_losses, pg_losses2)
pg_loss = masked_mean(pg_loss_max, ~padding_mask[micro_batch_inds])
loss = pg_loss + args.vf_coef * vf_loss
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
with torch.no_grad():
pg_clipfrac = masked_mean(
(pg_losses2 > pg_losses).float(), ~padding_mask[micro_batch_inds]
)
prob_dist = torch.nn.functional.softmax(logits, dim=-1)
entropy = torch.logsumexp(logits, dim=-1) - torch.sum(prob_dist * logits, dim=-1)
approxkl = 0.5 * (logprobs_diff**2).mean()
approxkl_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = approxkl
pg_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = (
pg_clipfrac
)
pg_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = pg_loss
vf_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = vf_loss
vf_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = (
vf_clipfrac
)
entropy_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = entropy.mean()
ratio_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = ratio.mean()
gradient_accumulation_idx += 1
minibatch_idx += 1
# del everything and empty cache
# fmt: off
del (
output, vpred_temp, logits, new_logprobs, vpred, vpredclipped,
vf_losses1, vf_losses2, vf_loss, vf_clipfrac, logprobs_diff, ratio, pg_losses, pg_losses2, pg_loss_max,
pg_loss, loss, pg_clipfrac, prob_dist, entropy, approxkl, mb_return,
mb_advantage, mb_values, mb_responses, mb_query_responses, mb_logprobs,
)
# fmt: on
empty_cache()
with torch.no_grad():
mean_kl = kl.sum(1).mean()
mean_entropy = (-logprobs).sum(1).mean()
mean_non_score_reward = non_score_reward.sum(1).mean()
rlhf_reward = mean_non_score_reward + scores.mean()
eps = int(self.state.episode / (time.time() - start_time))
metrics = {}
metrics["eps"] = eps
metrics["objective/kl"] = self.accelerator.gather_for_metrics(mean_kl).mean().item()
metrics["objective/entropy"] = self.accelerator.gather_for_metrics(mean_entropy).mean().item()
metrics["objective/non_score_reward"] = (
self.accelerator.gather_for_metrics(mean_non_score_reward).mean().item()
)
metrics["objective/rlhf_reward"] = self.accelerator.gather_for_metrics(rlhf_reward).mean().item()
metrics["objective/scores"] = self.accelerator.gather_for_metrics(scores.mean()).mean().item()
metrics["policy/approxkl_avg"] = self.accelerator.gather_for_metrics(approxkl_stats).mean().item()
metrics["policy/clipfrac_avg"] = self.accelerator.gather_for_metrics(pg_clipfrac_stats).mean().item()
metrics["loss/policy_avg"] = self.accelerator.gather_for_metrics(pg_loss_stats).mean().item()
metrics["loss/value_avg"] = self.accelerator.gather_for_metrics(vf_loss_stats).mean().item()
metrics["val/clipfrac_avg"] = self.accelerator.gather_for_metrics(vf_clipfrac_stats).mean().item()
metrics["policy/entropy_avg"] = self.accelerator.gather_for_metrics(entropy_stats).mean().item()
metrics["val/ratio"] = self.accelerator.gather_for_metrics(ratio_stats).mean().item()
metrics["val/ratio_var"] = self.accelerator.gather_for_metrics(ratio_stats).var().item()
metrics["val/num_eos_tokens"] = (responses == processing_class.eos_token_id).sum().item()
metrics["lr"] = self.lr_scheduler.get_last_lr()[0]
metrics["episode"] = self.state.episode
self.state.epoch = self.state.episode / self.train_dataset_len # used by self.log
self.state.global_step += 1
self.log(metrics)
self.lr_scheduler.step()
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
if self.control.should_save:
self._save_checkpoint(model, trial=None)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
del kl, mean_kl, mean_entropy, mean_non_score_reward, scores, metrics, non_score_reward
empty_cache()
gc.collect()
if args.num_sample_generations > 0 and (update - 1) % self.sample_generations_freq == 0:
self.generate_completions(sampling=True)
empty_cache()
del (
query_responses,
responses,
postprocessed_responses,
logprobs,
ref_logprobs,
values,
sequence_lengths,
contain_eos_token,
sequence_lengths_p1,
response_idxs,
padding_mask,
padding_mask_p1,
rewards,
actual_start,
actual_end,
advantages,
returns,
)
empty_cache()
# HF trainer specifics
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
if self.control.should_save:
self._save_checkpoint(model, trial=None, metrics=None)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def generate_completions(self, sampling: bool = False):
args = self.args
processing_class = self.processing_class
generation_config = GenerationConfig(
max_new_tokens=self.args.response_length,
temperature=(0.01 + 1e-7),
top_k=0.0,
top_p=1.0,
do_sample=True,
)
table = defaultdict(list)
with unwrap_model_for_generation(
self.model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation
) as unwrapped_model:
for batch in self.eval_dataloader:
query = batch["input_ids"]
with torch.no_grad():
context_length = query.shape[1]
query_response, _ = batch_generation(
unwrapped_model.policy,
query,
query.shape[0],
processing_class.pad_token_id,
generation_config,
)
response = query_response[:, context_length:]
postprocessed_response = response
if self.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0
postprocessed_response = truncate_response(
self.stop_token_id, processing_class.pad_token_id, response
)
table["query"].extend(
gather_object(processing_class.batch_decode(query, skip_special_tokens=True))
)
table["model response"].extend(
gather_object(processing_class.batch_decode(postprocessed_response))
)
postprocessed_query_response = torch.cat((query, postprocessed_response), 1)
_, score, _ = get_reward(
self.reward_model, postprocessed_query_response, processing_class.pad_token_id, context_length
)
table["score"].extend(self.accelerator.gather_for_metrics(score).float().cpu().numpy())
if sampling:
break
df = pd.DataFrame(table)
if self.accelerator.is_main_process:
if is_rich_available():
print_rich_table(df.iloc[0 : 0 + 5])
if "wandb" in args.report_to:
import wandb
if wandb.run is not None:
wandb.log({"completions": wandb.Table(dataframe=df)})
if "comet_ml" in args.report_to:
log_table_to_comet_experiment(
name="completions.csv",
table=df,
)
# Ensure the model card is saved along with the checkpoint
def _save_checkpoint(self, model, trial):
if self.args.hub_model_id is None:
model_name = Path(self.args.output_dir).name
else:
model_name = self.args.hub_model_id.split("/")[-1]
self.create_model_card(model_name=model_name)
super()._save_checkpoint(model, trial)
def create_model_card(
self,
model_name: Optional[str] = None,
dataset_name: Optional[str] = None,
tags: Union[str, list[str], None] = None,
):
"""
Creates a draft of a model card using the information available to the `Trainer`.
Args:
model_name (`str` or `None`, *optional*, defaults to `None`):
Name of the model.
dataset_name (`str` or `None`, *optional*, defaults to `None`):
Name of the dataset used for training.
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
Tags to be associated with the model card.
"""
if not self.is_world_process_zero():
return
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
base_model = self.model.config._name_or_path
else:
base_model = None
# normalize `tags` to a mutable set
if tags is None:
tags = set()
elif isinstance(tags, str):
tags = {tags}
else:
tags = set(tags)
if hasattr(self.model.config, "unsloth_version"):
tags.add("unsloth")
tags.update(self._tag_names)
# docstyle-ignore
citation = textwrap.dedent("""\
@article{mziegler2019fine-tuning,
title = {{Fine-Tuning Language Models from Human Preferences}},
author = {Daniel M. Ziegler and Nisan Stiennon and Jeffrey Wu and Tom B. Brown and Alec Radford and Dario Amodei and Paul F. Christiano and Geoffrey Irving},
year = 2019,
eprint = {arXiv:1909.08593}
}""")
model_card = generate_model_card(
base_model=base_model,
model_name=model_name,
hub_model_id=self.hub_model_id,
dataset_name=dataset_name,
tags=tags,
wandb_url=wandb.run.url if is_wandb_available() and wandb.run is not None else None,
comet_url=get_comet_experiment_url(),
trainer_name="PPO",
trainer_citation=citation,
paper_title="Fine-Tuning Language Models from Human Preferences",
paper_id="1909.08593",
)
model_card.save(os.path.join(self.args.output_dir, "README.md"))
| trl/trl/trainer/ppo_trainer.py/0 | {
"file_path": "trl/trl/trainer/ppo_trainer.py",
"repo_id": "trl",
"token_count": 19773
} | 647 |
# Introduction

Welcome to this first **Bonus Unit**, where you'll learn to **fine-tune a Large Language Model (LLM) for function calling**.
In terms of LLMs, function calling is quickly becoming a *must-know* technique.
The idea is, rather than relying only on prompt-based approaches like we did in Unit 1, function calling trains your model to **take actions and interpret observations during the training phase**, making your AI more robust.
> **When should I do this Bonus Unit?**
>
> This section is **optional** and is more advanced than Unit 1, so don't hesitate to either do this unit now or revisit it when your knowledge has improved thanks to this course.
>
> But don't worry, this Bonus Unit is designed to have all the information you need, so we'll walk you through every core concept of fine-tuning a model for function-calling even if you haven’t learned yet the inner workings of fine-tuning.
The best way for you to be able to follow this Bonus Unit is:
1. Know how to Fine-Tune an LLM with Transformers, if it's not the case [check this](https://huggingface.co/learn/nlp-course/chapter3/1?fw=pt).
2. Know how to use `SFTTrainer` to fine-tune our model, to learn more about it [check this documentation](https://huggingface.co/learn/nlp-course/en/chapter11/1).
---
## What You’ll Learn
1. **Function Calling**
How modern LLMs structure their conversations effectively letting them trigger **Tools**.
2. **LoRA (Low-Rank Adaptation)**
A **lightweight and efficient** fine-tuning method that cuts down on computational and storage overhead. LoRA makes training large models *faster, cheaper, and easier* to deploy.
3. **The Thought → Act → Observe Cycle** in Function Calling models
A simple but powerful approach for structuring how your model decides when (and how) to call functions, track intermediate steps, and interpret the results from external Tools or APIs.
4. **New Special Tokens**
We’ll introduce **special markers** that help the model distinguish between:
- Internal “chain-of-thought” reasoning
- Outgoing function calls
- Responses coming back from external tools
---
By the end of this bonus unit, you’ll be able to:
- **Understand** the inner working of APIs when it comes to Tools.
- **Fine-tune** a model using the LoRA technique.
- **Implement** and **modify** the Thought → Act → Observe cycle to create robust and maintainable Function-calling workflows.
- **Design and utilize** special tokens to seamlessly separate the model’s internal reasoning from its external actions.
And you'll **have fine-tuned your own model to do function calling.** 🔥
Let’s dive into **function calling**!
| agents-course/units/en/bonus-unit1/introduction.mdx/0 | {
"file_path": "agents-course/units/en/bonus-unit1/introduction.mdx",
"repo_id": "agents-course",
"token_count": 773
} | 0 |
# Table of Contents
You can access Unit 1 on hf.co/learn 👉 <a href="https://hf.co/learn/agents-course/unit1/introduction">here</a>
<!--
| Title | Description |
|-------|-------------|
| [Definition of an Agent](1_definition_of_an_agent.md) | General example of what agents can do without technical jargon. |
| [Explain LLMs](2_explain_llms.md) | Explanation of Large Language Models, including the family tree of models and suitable models for agents. |
| [Messages and Special Tokens](3_messages_and_special_tokens.md) | Explanation of messages, special tokens, and chat-template usage. |
| [Dummy Agent Library](4_dummy_agent_library.md) | Introduction to using a dummy agent library and serverless API. |
| [Tools](5_tools.md) | Overview of Pydantic for agent tools and other common tool formats. |
| [Agent Steps and Structure](6_agent_steps_and_structure.md) | Steps involved in an agent, including thoughts, actions, observations, and a comparison between code agents and JSON agents. |
| [Thoughts](7_thoughts.md) | Explanation of thoughts and the ReAct approach. |
| [Actions](8_actions.md) | Overview of actions and stop and parse approach. |
| [Observations](9_observations.md) | Explanation of observations and append result to reflect. |
| [Quizz](10_quizz.md) | Contains quizzes to test understanding of the concepts. |
| [Simple Use Case](11_simple_use_case.md) | Provides a simple use case exercise using datetime and a Python function as a tool. |
--> | agents-course/units/en/unit1/README.md/0 | {
"file_path": "agents-course/units/en/unit1/README.md",
"repo_id": "agents-course",
"token_count": 420
} | 1 |
# Introduction to Agentic Frameworks
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/thumbnail.jpg" alt="Thumbnail"/>
Welcome to this second unit, where **we'll explore different agentic frameworks** that can be used to build powerful agentic applications.
We will study:
- In Unit 2.1: [smolagents](https://huggingface.co/docs/smolagents/en/index)
- In Unit 2.2: [LlamaIndex](https://www.llamaindex.ai/)
- In Unit 2.3: [LangGraph](https://www.langchain.com/langgraph)
Let's dive in! 🕵
## When to Use an Agentic Framework
An agentic framework is **not always needed when building an application around LLMs**. They provide flexibility in the workflow to efficiently solve a specific task, but they're not always necessary.
Sometimes, **predefined workflows are sufficient** to fulfill user requests, and there is no real need for an agentic framework. If the approach to build an agent is simple, like a chain of prompts, using plain code may be enough. The advantage is that the developer will have **full control and understanding of their system without abstractions**.
However, when the workflow becomes more complex, such as letting an LLM call functions or using multiple agents, these abstractions start to become helpful.
Considering these ideas, we can already identify the need for some features:
* An *LLM engine* that powers the system.
* A *list of tools* the agent can access.
* A *parser* for extracting tool calls from the LLM output.
* A *system prompt* synced with the parser.
* A *memory system*.
* *Error logging and retry mechanisms* to control LLM mistakes.
We'll explore how these topics are resolved in various frameworks, including `smolagents`, `LlamaIndex`, and `LangGraph`.
## Agentic Frameworks Units
| Framework | Description | Unit Author |
|------------|----------------|----------------|
| [smolagents](./smolagents/introduction) | Agents framework developed by Hugging Face. | Sergio Paniego - [HF](https://huggingface.co/sergiopaniego) - [X](https://x.com/sergiopaniego) - [Linkedin](https://www.linkedin.com/in/sergio-paniego-blanco) |
| [Llama-Index](./llama-index/introduction) | End-to-end tooling to ship a context-augmented AI agent to production | David Berenstein - [HF](https://huggingface.co/davidberenstein1957) - [X](https://x.com/davidberenstei) - [Linkedin](https://www.linkedin.com/in/davidberenstein) |
| [LangGraph](./langgraph/introduction) | Agents allowing stateful orchestration of agents | Joffrey THOMAS - [HF](https://huggingface.co/Jofthomas) - [X](https://x.com/Jthmas404) - [Linkedin](https://www.linkedin.com/in/joffrey-thomas) |
| agents-course/units/en/unit2/introduction.mdx/0 | {
"file_path": "agents-course/units/en/unit2/introduction.mdx",
"repo_id": "agents-course",
"token_count": 780
} | 2 |
# Using Tools in LlamaIndex
**Defining a clear set of Tools is crucial to performance.** As we discussed in [unit 1](../../unit1/tools), clear tool interfaces are easier for LLMs to use.
Much like a software API interface for human engineers, they can get more out of the tool if it's easy to understand how it works.
There are **four main types of tools in LlamaIndex**:

1. `FunctionTool`: Convert any Python function into a tool that an agent can use. It automatically figures out how the function works.
2. `QueryEngineTool`: A tool that lets agents use query engines. Since agents are built on query engines, they can also use other agents as tools.
3. `Toolspecs`: Sets of tools created by the community, which often include tools for specific services like Gmail.
4. `Utility Tools`: Special tools that help handle large amounts of data from other tools.
We will go over each of them in more detail below.
## Creating a FunctionTool
<Tip>
You can follow the code in <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/llama-index/tools.ipynb" target="_blank">this notebook</a> that you can run using Google Colab.
</Tip>
A FunctionTool provides a simple way to wrap any Python function and make it available to an agent.
You can pass either a synchronous or asynchronous function to the tool, along with optional `name` and `description` parameters.
The name and description are particularly important as they help the agent understand when and how to use the tool effectively.
Let's look at how to create a FunctionTool below and then call it.
```python
from llama_index.core.tools import FunctionTool
def get_weather(location: str) -> str:
"""Useful for getting the weather for a given location."""
print(f"Getting weather for {location}")
return f"The weather in {location} is sunny"
tool = FunctionTool.from_defaults(
get_weather,
name="my_weather_tool",
description="Useful for getting the weather for a given location.",
)
tool.call("New York")
```
<Tip>When using an agent or LLM with function calling, the tool selected (and the arguments written for that tool) rely strongly on the tool name and description of the purpose and arguments of the tool. Learn more about function calling in the <a href="https://docs.llamaindex.ai/en/stable/examples/workflow/function_calling_agent/">Function Calling Guide</a>.</Tip>
## Creating a QueryEngineTool
The `QueryEngine` we defined in the previous unit can be easily transformed into a tool using the `QueryEngineTool` class.
Let's see how to create a `QueryEngineTool` from a `QueryEngine` in the example below.
```python
from llama_index.core import VectorStoreIndex
from llama_index.core.tools import QueryEngineTool
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore
embed_model = HuggingFaceEmbedding("BAAI/bge-small-en-v1.5")
db = chromadb.PersistentClient(path="./alfred_chroma_db")
chroma_collection = db.get_or_create_collection("alfred")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
query_engine = index.as_query_engine(llm=llm)
tool = QueryEngineTool.from_defaults(query_engine, name="some useful name", description="some useful description")
```
## Creating Toolspecs
Think of `ToolSpecs` as collections of tools that work together harmoniously - like a well-organized professional toolkit.
Just as a mechanic's toolkit contains complementary tools that work together for vehicle repairs, a `ToolSpec` combines related tools for specific purposes.
For example, an accounting agent's `ToolSpec` might elegantly integrate spreadsheet capabilities, email functionality, and calculation tools to handle financial tasks with precision and efficiency.
<details>
<summary>Install the Google Toolspec</summary>
As introduced in the <a href="./llama-hub">section on the LlamaHub</a>, we can install the Google toolspec with the following command:
```python
pip install llama-index-tools-google
```
</details>
And now we can load the toolspec and convert it to a list of tools.
```python
from llama_index.tools.google import GmailToolSpec
tool_spec = GmailToolSpec()
tool_spec_list = tool_spec.to_tool_list()
```
To get a more detailed view of the tools, we can take a look at the `metadata` of each tool.
```python
[(tool.metadata.name, tool.metadata.description) for tool in tool_spec_list]
```
### Model Context Protocol (MCP) in LlamaIndex
LlamaIndex also allows using MCP tools through a [ToolSpec on the LlamaHub](https://llamahub.ai/l/tools/llama-index-tools-mcp?from=).
You can simply run an MCP server and start using it through the following implementation.
If you want to dive deeper about MCP, you can check our [free MCP Course](https://huggingface.co/learn/mcp-course/).
<details>
<summary>Install the MCP Toolspec</summary>
As introduced in the <a href="./llama-hub">section on the LlamaHub</a>, we can install the MCP toolspec with the following command:
```python
pip install llama-index-tools-mcp
```
</details>
```python
from llama_index.tools.mcp import BasicMCPClient, McpToolSpec
# We consider there is a mcp server running on 127.0.0.1:8000, or you can use the mcp client to connect to your own mcp server.
mcp_client = BasicMCPClient("http://127.0.0.1:8000/sse")
mcp_tool = McpToolSpec(client=mcp_client)
# get the agent
agent = await get_agent(mcp_tool)
# create the agent context
agent_context = Context(agent)
```
## Utility Tools
Oftentimes, directly querying an API **can return an excessive amount of data**, some of which may be irrelevant, overflow the context window of the LLM, or unnecessarily increase the number of tokens that you are using.
Let's walk through our two main utility tools below.
1. `OnDemandToolLoader`: This tool turns any existing LlamaIndex data loader (BaseReader class) into a tool that an agent can use. The tool can be called with all the parameters needed to trigger `load_data` from the data loader, along with a natural language query string. During execution, we first load data from the data loader, index it (for instance with a vector store), and then query it 'on-demand'. All three of these steps happen in a single tool call.
2. `LoadAndSearchToolSpec`: The LoadAndSearchToolSpec takes in any existing Tool as input. As a tool spec, it implements `to_tool_list`, and when that function is called, two tools are returned: a loading tool and then a search tool. The load Tool execution would call the underlying Tool, and then index the output (by default with a vector index). The search Tool execution would take in a query string as input and call the underlying index.
<Tip>You can find toolspecs and utility tools on the <a href="https://llamahub.ai/">LlamaHub</a></Tip>
Now that we understand the basics of agents and tools in LlamaIndex, let's see how we can **use LlamaIndex to create configurable and manageable workflows!**
| agents-course/units/en/unit2/llama-index/tools.mdx/0 | {
"file_path": "agents-course/units/en/unit2/llama-index/tools.mdx",
"repo_id": "agents-course",
"token_count": 2037
} | 3 |
# Agentic Retrieval Augmented Generation (RAG)
In this unit, we'll be taking a look at how we can use Agentic RAG to help Alfred prepare for the amazing gala.
<Tip>We know we've already discussed Retrieval Augmented Generation (RAG) and agentic RAG in the previous unit, so feel free to skip ahead if you're already familiar with the concepts.</Tip>
LLMs are trained on enormous bodies of data to learn general knowledge.
However, the world knowledge model of LLMs may not always be relevant and up-to-date information.
**RAG solves this problem by finding and retrieving relevant information from your data and forwarding that to the LLM.**

Now, think about how Alfred works:
1. We've asked Alfred to help plan a gala
2. Alfred needs to find the latest news and weather information
3. Alfred needs to structure and search the guest information
Just as Alfred needs to search through your household information to be helpful, any agent needs a way to find and understand relevant data.
**Agentic RAG is a powerful way to use agents to answer questions about your data.** We can pass various tools to Alfred to help him answer questions.
However, instead of answering the question on top of documents automatically, Alfred can decide to use any other tool or flow to answer the question.

Let's start **building our agentic RAG workflow!**
First, we'll create a RAG tool to retrieve up-to-date details about the invitees. Next, we'll develop tools for web search, weather updates, and Hugging Face Hub model download statistics. Finally, we'll integrate everything to bring our agentic RAG agent to life!
| agents-course/units/en/unit3/agentic-rag/agentic-rag.mdx/0 | {
"file_path": "agents-course/units/en/unit3/agentic-rag/agentic-rag.mdx",
"repo_id": "agents-course",
"token_count": 474
} | 4 |
# Observabilidad y Evaluación de Agentes de IA

¡Bienvenido a la **Unidad Extra 2**! En este capítulo, explorarás estrategias avanzadas para observar, evaluar y, en última instancia, mejorar el rendimiento de tus agentes.
---
## 📚 ¿Cuándo debería hacer esta Unidad Extra?
Esta unidad extra es perfecta si:
- **Desarrollas y Despliegas Agentes de IA:** Quieres asegurarte de que tus agentes estén funcionando de manera confiable en producción.
- **Necesitas Información Detallada:** Buscas diagnosticar problemas, optimizar el rendimiento o entender el funcionamiento interno de tu agente.
- **Buscas Reducir la Sobrecarga Operativa:** Al monitorear los costos, la latencia y los detalles de ejecución del agente, puedes gestionar los recursos de manera eficiente.
- **Buscas Mejora Continua:** Estás interesado en integrar tanto la retroalimentación de usuarios en tiempo real como la evaluación automatizada en tus aplicaciones de IA.
En resumen, ¡para todos los que quieran poner sus agentes frente a los usuarios!
---
## 🤓 What You’ll Learn
## 🤓 Lo que Aprenderás
En esta unidad, aprenderás:
- **Instrumentar tu Agente:** Aprende cómo integrar herramientas de observabilidad a través de OpenTelemetry con el framework *smolagents*.
- **Monitorear Métricas:** Seguimiento de indicadores de rendimiento como el uso de tokens (costos), latencia y trazas de errores.
- **Evaluar en Tiempo Real:** Comprende técnicas para evaluación en vivo, incluyendo la recopilación de retroalimentación de usuarios y el aprovechamiento de un LLM como juez.
- **Análisis Offline:** Utiliza conjuntos de datos de referencia (por ejemplo, GSM8K) para probar y comparar el rendimiento de agentes.
---
## 🚀 ¿Listo para Empezar?
En la siguiente sección, aprenderás los fundamentos de la Observabilidad y Evaluación de Agentes. Después de eso, ¡es hora de verlo en acción! | agents-course/units/es/bonus-unit2/introduction.mdx/0 | {
"file_path": "agents-course/units/es/bonus-unit2/introduction.mdx",
"repo_id": "agents-course",
"token_count": 729
} | 5 |
# Entendiendo los Agentes de IA a través del Ciclo Pensamiento-Acción-Observación
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-check-3.jpg" alt="Planificación de la Unidad 1"/>
En las secciones anteriores, aprendimos:
- **Cómo las herramientas se ponen a disposición del agente en el prompt del sistema**.
- **Cómo los agentes de IA son sistemas que pueden 'razonar', planificar e interactuar con su entorno**.
En esta sección, **exploraremos el Flujo de Trabajo completo del Agente de IA**, un ciclo que definimos como Pensamiento-Acción-Observación.
Y luego, profundizaremos en cada uno de estos pasos.
## Los Componentes Principales
Los agentes trabajan en un ciclo continuo de: **pensar (Pensamiento) → actuar (Acción) y observar (Observación)**.
Analicemos estas acciones juntos:
1. **Pensamiento**: La parte LLM del Agente decide cuál debe ser el siguiente paso.
2. **Acción:** El agente realiza una acción, llamando a las herramientas con los argumentos asociados.
3. **Observación:** El modelo reflexiona sobre la respuesta de la herramienta.
## El Ciclo Pensamiento-Acción-Observación
Los tres componentes trabajan juntos en un bucle continuo. Para usar una analogía de la programación, el agente utiliza un **bucle while**: el bucle continúa hasta que se cumple el objetivo del agente.
Visualmente, se ve así:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AgentCycle.gif" alt="Ciclo Pensar, Actuar, Observar"/>
En muchos frameworks de Agentes, **las reglas y directrices están integradas directamente en el prompt del sistema**, asegurando que cada ciclo se adhiera a una lógica definida.
En una versión simplificada, nuestro prompt del sistema puede verse así:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/system_prompt_cycle.png" alt="Ciclo Pensar, Actuar, Observar"/>
Vemos aquí que en el Mensaje del Sistema definimos:
- El *comportamiento del Agente*.
- Las *Herramientas a las que nuestro Agente tiene acceso*, como describimos en la sección anterior.
- El *Ciclo Pensamiento-Acción-Observación*, que incorporamos en las instrucciones del LLM.
Tomemos un pequeño ejemplo para entender el proceso antes de profundizar en cada paso del proceso.
## Alfred, el Agente del clima
Creamos a Alfred, el Agente del Clima.
Un usuario le pregunta a Alfred: "¿Cómo está el clima en Nueva York hoy?"
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent.jpg" alt="Agente Alfred"/>
El trabajo de Alfred es responder a esta consulta utilizando una herramienta de API del clima.
Así es como se desarrolla el ciclo:
### Pensamiento
**Razonamiento Interno:**
Al recibir la consulta, el diálogo interno de Alfred podría ser:
*"El usuario necesita información del clima actual para Nueva York. Tengo acceso a una herramienta que obtiene datos del clima. Primero, necesito llamar a la API del clima para obtener detalles actualizados."*
Este paso muestra al agente dividiendo el problema en pasos: primero, recopilando los datos necesarios.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-1.jpg" alt="Agente Alfred"/>
### Acción
**Uso de Herramientas:**
Basado en su razonamiento y en el hecho de que Alfred conoce una herramienta `get_weather`, Alfred prepara un comando con formato JSON que llama a la herramienta de API del clima. Por ejemplo, su primera acción podría ser:
Pensamiento: Necesito verificar el clima actual para Nueva York.
```
{
"action": "get_weather",
"action_input": {
"location": "Nueva York"
}
}
```
Aquí, la acción especifica claramente qué herramienta llamar (por ejemplo, get_weather) y qué parámetro pasar (el "location": "Nueva York").
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-2.jpg" alt="Agente Alfred"/>
### Observación
**Retroalimentación del Entorno:**
Después de la llamada a la herramienta, Alfred recibe una observación. Esto podría ser los datos brutos del clima de la API, como:
*"Clima actual en Nueva York: parcialmente nublado, 15°C, 60% de humedad."*
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-3.jpg" alt="Agente Alfred"/>
Esta observación se agrega luego al prompt como contexto adicional. Funciona como retroalimentación del mundo real, confirmando si la acción tuvo éxito y proporcionando los detalles necesarios.
### Pensamiento actualizado
**Reflexionando:**
Con la observación en mano, Alfred actualiza su razonamiento interno:
*"Ahora que tengo los datos del clima para Nueva York, puedo compilar una respuesta para el usuario."*
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-4.jpg" alt="Agente Alfred"/>
### Acción Final
Alfred luego genera una respuesta final formateada como le indicamos:
Pensamiento: Ya tengo los datos del clima. El clima actual en Nueva York es parcialmente nublado con una temperatura de 15°C y 60% de humedad.
Respuesta final: El clima actual en Nueva York es parcialmente nublado con una temperatura de 15°C y 60% de humedad.
Esta acción final envía la respuesta de vuelta al usuario, cerrando el bucle.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-5.jpg" alt="Agente Alfred"/>
Lo que vemos en este ejemplo:
- **Los agentes iteran a través de un bucle hasta que se cumple el objetivo:**
**El proceso de Alfred es cíclico**. Comienza con un pensamiento, luego actúa llamando a una herramienta, y finalmente observa el resultado. Si la observación hubiera indicado un error o datos incompletos, Alfred podría haber vuelto a entrar en el ciclo para corregir su enfoque.
- **Integración de Herramientas:**
La capacidad de llamar a una herramienta (como una API del clima) permite a Alfred **ir más allá del conocimiento estático y recuperar datos en tiempo real**, un aspecto esencial de muchos Agentes de IA.
- **Adaptación Dinámica:**
Cada ciclo permite al agente incorporar información nueva (observaciones) en su razonamiento (pensamiento), asegurando que la respuesta final esté bien informada y sea precisa.
Este ejemplo muestra el concepto central detrás del *ciclo ReAct* (un concepto que vamos a desarrollar en la siguiente sección): **la interacción de Pensamiento, Acción y Observación empodera a los agentes de IA para resolver tareas complejas de manera iterativa**.
Al entender y aplicar estos principios, puedes diseñar agentes que no solo razonan sobre sus tareas sino que también **utilizan eficazmente herramientas externas para completarlas**, todo mientras refinan continuamente su salida basándose en la retroalimentación del entorno.
---
Ahora profundicemos en el Pensamiento, la Acción y la Observación como los pasos individuales del proceso.
| agents-course/units/es/unit1/agent-steps-and-structure.mdx/0 | {
"file_path": "agents-course/units/es/unit1/agent-steps-and-structure.mdx",
"repo_id": "agents-course",
"token_count": 2625
} | 6 |
# Conclusión
¡Felicidades por terminar el módulo de `LangGraph` de esta segunda Unidad! 🥳
Ahora has dominado los fundamentos para construir flujos de trabajo estructurados con LangGraph que podrás llevar a producción.
Este módulo es solo el comienzo de tu viaje con LangGraph. Para temas más avanzados, recomendamos:
- Explorar la [documentación oficial de LangGraph](https://github.com/langchain-ai/langgraph)
- Tomar el curso completo [Introducción a LangGraph](https://academy.langchain.com/courses/intro-to-langgraph) de LangChain Academy
- ¡Construir algo por ti mismo!
En la siguiente Unidad, explorarás casos de uso reales. ¡Es hora de dejar la teoría para entrar en acción real!
Agradeceríamos mucho **tus opiniones sobre el curso y sugerencias para mejorarlo**. Si tienes comentarios, por favor 👉 [completa este formulario](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog)
### ¡Sigue Aprendiendo, Mantente Increíble! 🤗
¡Estimado/a Señor/a! 🎩🦇
-Alfred- | agents-course/units/es/unit2/langgraph/conclusion.mdx/0 | {
"file_path": "agents-course/units/es/unit2/langgraph/conclusion.mdx",
"repo_id": "agents-course",
"token_count": 420
} | 7 |
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/code_agents.ipynb"},
]} />
# Construcción de Agentes que Usan Código
Los agentes de código son el tipo de agente predeterminado en `smolagents`. Generan llamadas a herramientas en Python para realizar acciones, logrando representaciones de acciones que son eficientes, expresivas y precisas.
Su enfoque simplificado reduce el número de acciones requeridas, simplifica operaciones complejas y permite la reutilización de funciones de código existentes. `smolagents` proporciona un marco ligero para construir agentes de código, implementado en aproximadamente 1,000 líneas de código.

Gráfico del artículo [Executable Code Actions Elicit Better LLM Agents](https://huggingface.co/papers/2402.01030)
<Tip>
Si quieres aprender más sobre por qué los agentes de código son efectivos, consulta <a href="https://huggingface.co/docs/smolagents/en/conceptual_guides/intro_agents#code-agents" target="_blank">esta guía</a> de la documentación de smolagents.
</Tip>
## ¿Por qué Agentes de Código?
En un proceso de agente de múltiples pasos, el LLM escribe y ejecuta acciones, típicamente involucrando llamadas a herramientas externas. Los enfoques tradicionales utilizan un formato JSON para especificar nombres de herramientas y argumentos como cadenas de texto, **que el sistema debe analizar para determinar qué herramienta ejecutar**.
Sin embargo, la investigación muestra que **los LLMs que llaman a herramientas funcionan más efectivamente con código directamente**. Este es un principio fundamental de `smolagents`, como se muestra en el diagrama anterior del artículo [Executable Code Actions Elicit Better LLM Agents](https://huggingface.co/papers/2402.01030).
Escribir acciones en código en lugar de JSON ofrece varias ventajas clave:
* **Componibilidad**: Combinar y reutilizar acciones fácilmente
* **Gestión de Objetos**: Trabajar directamente con estructuras complejas como imágenes
* **Generalidad**: Expresar cualquier tarea computacionalmente posible
* **Natural para LLMs**: Código de alta calidad ya está presente en los datos de entrenamiento de LLMs
## ¿Cómo Funciona un Agente de Código?

El diagrama anterior ilustra cómo funciona `CodeAgent.run()`, siguiendo el marco ReAct que mencionamos en la Unidad 1. La abstracción principal para agentes en `smolagents` es un `MultiStepAgent`, que sirve como el bloque de construcción central. `CodeAgent` es un tipo especial de `MultiStepAgent`, como veremos en un ejemplo a continuación.
Un `CodeAgent` realiza acciones a través de un ciclo de pasos, con variables y conocimientos existentes incorporados en el contexto del agente, que se mantiene en un registro de ejecución:
1. El prompt del sistema se almacena en un `SystemPromptStep`, y la consulta del usuario se registra en un `TaskStep`.
2. Luego, se ejecuta el siguiente bucle while:
2.1 El método `agent.write_memory_to_messages()` escribe los registros del agente en una lista de [mensajes de chat](https://huggingface.co/docs/transformers/en/chat_templating) legibles por el LLM.
2.2 Estos mensajes se envían a un `Model`, que genera una finalización.
2.3 La finalización se analiza para extraer la acción, que, en nuestro caso, debería ser un fragmento de código ya que estamos trabajando con un `CodeAgent`.
2.4 La acción se ejecuta.
2.5 Los resultados se registran en la memoria en un `ActionStep`.
Al final de cada paso, si el agente incluye alguna llamada a función (en `agent.step_callback`), estas se ejecutan.
## Veamos Algunos Ejemplos
<Tip>
Puedes seguir el código en <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/code_agents.ipynb" target="_blank">este notebook</a> que puedes ejecutar usando Google Colab.
</Tip>
Alfred está planeando una fiesta en la mansión de la familia Wayne y necesita tu ayuda para asegurarse de que todo salga bien. Para ayudarlo, aplicaremos lo que hemos aprendido sobre cómo opera un `CodeAgent` de múltiples pasos.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/smolagents/alfred-party.jpg" alt="Fiesta de Alfred"/>
Si aún no has instalado `smolagents`, puedes hacerlo ejecutando el siguiente comando:
```bash
pip install smolagents -U
```
También iniciemos sesión en el Hugging Face Hub para tener acceso a la API de Inferencia Serverless.
```python
from huggingface_hub import login
login()
```
### Seleccionando una Lista de Reproducción para la Fiesta Usando `smolagents`
¡La música es una parte esencial de una fiesta exitosa! Alfred necesita ayuda para seleccionar la lista de reproducción. Por suerte, ¡`smolagents` nos tiene cubiertos! Podemos construir un agente capaz de buscar en la web usando DuckDuckGo. Para dar al agente acceso a esta herramienta, la incluimos en la lista de herramientas al crear el agente.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/smolagents/alfred-playlist.jpg" alt="Lista de reproducción de Alfred"/>
Para el modelo, confiaremos en `InferenceClientModel`, que proporciona acceso a la [API de Inferencia Serverless](https://huggingface.co/docs/api-inference/index) de Hugging Face. El modelo predeterminado es `"Qwen/Qwen2.5-Coder-32B-Instruct"`, que es eficiente y está disponible para inferencia rápida, pero puedes seleccionar cualquier modelo compatible del Hub.
Ejecutar un agente es bastante sencillo:
```python
from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel
agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=InferenceClientModel())
agent.run("Busca las mejores recomendaciones de música para una fiesta en la mansión de los Wayne.")
```
Cuando ejecutes este ejemplo, la salida **mostrará un seguimiento de los pasos del flujo de trabajo siendo ejecutados**. También imprimirá el código Python correspondiente con el mensaje:
```python
─ Ejecutando código analizado: ────────────────────────────────────────────────────────────────────────────────────
results = web_search(query="mejor música para una fiesta de Batman")
print(results)
─────────────────────────────────────────────────────────────────────────────────────────────────────────────────
```
¡Después de algunos pasos, verás la lista de reproducción generada que Alfred puede usar para la fiesta! 🎵
### Usando una Herramienta Personalizada para Preparar el Menú
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/smolagents/alfred-menu.jpg" alt="Menú de Alfred"/>
Ahora que hemos seleccionado una lista de reproducción, necesitamos organizar el menú para los invitados. De nuevo, Alfred puede aprovechar `smolagents` para hacerlo. Aquí, usamos el decorador `@tool` para definir una función personalizada que actúa como herramienta. Cubriremos la creación de herramientas con más detalle más adelante, así que por ahora, simplemente podemos ejecutar el código.
Como puedes ver en el ejemplo a continuación, crearemos una herramienta usando el decorador `@tool` y la incluiremos en la lista de `tools`.
```python
from smolagents import CodeAgent, tool, InferenceClientModel
# Herramienta para sugerir un menú basado en la ocasión
@tool
def suggest_menu(occasion: str) -> str:
"""
Sugiere un menú basado en la ocasión.
Args:
occasion: El tipo de ocasión para la fiesta.
"""
if occasion == "casual":
return "Pizza, aperitivos y bebidas."
elif occasion == "formal":
return "Cena de 3 platos con vino y postre."
elif occasion == "superhero":
return "Buffet con comida saludable y de alta energía."
else:
return "Menú personalizado para el mayordomo."
# Alfred, el mayordomo, preparando el menú para la fiesta
agent = CodeAgent(tools=[suggest_menu], model=InferenceClientModel())
# Preparando el menú para la fiesta
agent.run("Prepara un menú formal para la fiesta.")
```
El agente se ejecutará durante algunos pasos hasta encontrar la respuesta.
¡El menú está listo! 🥗
### Usando Importaciones de Python Dentro del Agente
Tenemos la lista de reproducción y el menú listos, pero necesitamos verificar un detalle más crucial: ¡el tiempo de preparación!
Alfred necesita calcular cuándo todo estaría listo si comenzara a preparar ahora, en caso de que necesiten asistencia de otros superhéroes.
`smolagents` se especializa en agentes que escriben y ejecutan fragmentos de código Python, ofreciendo ejecución en sandbox para seguridad.
**La ejecución de código tiene medidas de seguridad estrictas** - las importaciones fuera de una lista predefinida segura están bloqueadas por defecto. Sin embargo, puedes autorizar importaciones adicionales pasándolas como cadenas en `additional_authorized_imports`.
Para más detalles sobre la ejecución segura de código, consulta la [guía](https://huggingface.co/docs/smolagents/tutorials/secure_code_execution) oficial.
Al crear el agente, usaremos `additional_authorized_imports` para permitir la importación del módulo `datetime`.
```python
from smolagents import CodeAgent, InferenceClientModel
import numpy as np
import time
import datetime
agent = CodeAgent(tools=[], model=InferenceClientModel(), additional_authorized_imports=['datetime'])
agent.run(
"""
Alfred necesita prepararse para la fiesta. Aquí están las tareas:
1. Preparar las bebidas - 30 minutos
2. Decorar la mansión - 60 minutos
3. Configurar el menú - 45 minutos
4. Preparar la música y la lista de reproducción - 45 minutos
Si comenzamos ahora mismo, ¿a qué hora estará lista la fiesta?
"""
)
```
Estos ejemplos son solo el comienzo de lo que puedes hacer con agentes de código, y ya estamos empezando a ver su utilidad para preparar la fiesta.
Puedes aprender más sobre cómo construir agentes de código en la [documentación de smolagents](https://huggingface.co/docs/smolagents).
En resumen, `smolagents` se especializa en agentes que escriben y ejecutan fragmentos de código Python, ofreciendo ejecución en sandbox para seguridad. Soporta modelos de lenguaje tanto locales como basados en API, haciéndolo adaptable a varios entornos de desarrollo.
### Compartiendo Nuestro Agente Preparador de Fiestas Personalizado en el Hub
¿No sería **increíble compartir nuestro propio agente Alfred con la comunidad**? Al hacerlo, cualquiera puede descargar y usar fácilmente el agente directamente desde el Hub, ¡llevando el mejor planificador de fiestas de Gotham a sus manos! ¡Hagámoslo posible! 🎉
La biblioteca `smolagents` hace esto posible al permitirte compartir un agente completo con la comunidad y descargar otros para uso inmediato. Es tan simple como lo siguiente:
```python
# Cambia a tu nombre de usuario y nombre de repositorio
agent.push_to_hub('sergiopaniego/AlfredAgent')
```
Para descargar el agente nuevamente, usa el código a continuación:
```python
# Cambia a tu nombre de usuario y nombre de repositorio
alfred_agent = agent.from_hub('sergiopaniego/AlfredAgent')
alfred_agent.run("Dame la mejor lista de reproducción para una fiesta en la mansión de Wayne. La idea de la fiesta es un tema de 'mascarada de villanos'")
```
Lo que también es emocionante es que los agentes compartidos están directamente disponibles como Hugging Face Spaces, permitiéndote interactuar con ellos en tiempo real. Puedes explorar otros agentes [aquí](https://huggingface.co/spaces/davidberenstein1957/smolagents-and-tools).
Por ejemplo, el _AlfredAgent_ está disponible [aquí](https://huggingface.co/spaces/sergiopaniego/AlfredAgent). Puedes probarlo directamente a continuación:
<iframe
src="https://sergiopaniego-alfredagent.hf.space/"
frameborder="0"
width="850"
height="450"
></iframe>
Tal vez te preguntes: ¿cómo construyó Alfred un agente así usando `smolagents`? Al integrar varias herramientas, puede generar un agente de la siguiente manera. No te preocupes por las herramientas por ahora, ya que tendremos una sección dedicada más adelante en esta unidad para explorar eso en detalle:
```python
from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, InferenceClientModel, Tool, tool, VisitWebpageTool
@tool
def suggest_menu(occasion: str) -> str:
"""
Sugiere un menú basado en la ocasión.
Args:
occasion: El tipo de ocasión para la fiesta.
"""
if occasion == "casual":
return "Pizza, aperitivos y bebidas."
elif occasion == "formal":
return "Cena de 3 platos con vino y postre."
elif occasion == "superhero":
return "Buffet con comida saludable y de alta energía."
else:
return "Menú personalizado para el mayordomo."
@tool
def catering_service_tool(query: str) -> str:
"""
Esta herramienta devuelve el servicio de catering mejor calificado en Ciudad Gótica.
Args:
query: Un término de búsqueda para encontrar servicios de catering.
"""
# Lista de ejemplo de servicios de catering y sus calificaciones
services = {
"Gotham Catering Co.": 4.9,
"Wayne Manor Catering": 4.8,
"Gotham City Events": 4.7,
}
# Encuentra el servicio de catering mejor calificado (simulando filtrado de consulta de búsqueda)
best_service = max(services, key=services.get)
return best_service
class SuperheroPartyThemeTool(Tool):
name = "superhero_party_theme_generator"
description = """
Esta herramienta sugiere ideas creativas para fiestas temáticas de superhéroes basadas en una categoría.
Devuelve una idea única de tema para la fiesta."""
inputs = {
"category": {
"type": "string",
"description": "El tipo de fiesta de superhéroes (por ejemplo, 'héroes clásicos', 'mascarada de villanos', 'Gotham futurista').",
}
}
output_type = "string"
def forward(self, category: str):
themes = {
"classic heroes": "Gala de la Liga de la Justicia: Los invitados vienen vestidos como sus héroes favoritos de DC con cócteles temáticos como 'El Ponche de Kryptonita'.",
"villain masquerade": "Baile de los Pícaros de Gotham: Una mascarada misteriosa donde los invitados se visten como villanos clásicos de Batman.",
"futuristic Gotham": "Noche Neo-Gotham: Una fiesta de estilo cyberpunk inspirada en Batman Beyond, con decoraciones de neón y gadgets futuristas."
}
return themes.get(category.lower(), "Idea de fiesta temática no encontrada. Prueba con 'héroes clásicos', 'mascarada de villanos' o 'Gotham futurista'.")
# Alfred, el mayordomo, preparando el menú para la fiesta
agent = CodeAgent(
tools=[
DuckDuckGoSearchTool(),
VisitWebpageTool(),
suggest_menu,
catering_service_tool,
SuperheroPartyThemeTool()
],
model=InferenceClientModel(),
max_steps=10,
verbosity_level=2
)
agent.run("Dame la mejor lista de reproducción para una fiesta en la mansión de Wayne. La idea de la fiesta es un tema de 'mascarada de villanos'")
```
Como puedes ver, hemos creado un `CodeAgent` con varias herramientas que mejoran la funcionalidad del agente, ¡convirtiéndolo en el mejor planificador de fiestas listo para compartir con la comunidad! 🎉
Ahora, es tu turno: ¡construye tu propio agente y compártelo con la comunidad usando el conocimiento que acabamos de aprender! 🕵️♂️💡
<Tip>
Si deseas compartir tu proyecto de agente, entonces crea un space y etiqueta a [agents-course](https://huggingface.co/agents-course) en el Hugging Face Hub. ¡Nos encantaría ver lo que has creado!
</Tip>
### Inspeccionando Nuestro Agente Preparador de Fiestas con OpenTelemetry y Langfuse 📡
A medida que Alfred perfecciona el Agente Preparador de Fiestas, se está cansando de depurar sus ejecuciones. Los agentes, por naturaleza, son impredecibles y difíciles de inspeccionar. Pero como su objetivo es construir el mejor Agente Preparador de Fiestas y desplegarlo en producción, necesita una trazabilidad robusta para monitoreo y análisis futuros.
¡Una vez más, `smolagents` viene al rescate! Adopta el estándar [OpenTelemetry](https://opentelemetry.io/) para instrumentar ejecuciones de agentes, permitiendo una inspección y registro sin problemas. Con la ayuda de [Langfuse](https://langfuse.com/) y el `SmolagentsInstrumentor`, Alfred puede rastrear y analizar fácilmente el comportamiento de su agente.
¡Configurarlo es sencillo!
Primero, necesitamos instalar las dependencias necesarias:
```bash
pip install opentelemetry-sdk opentelemetry-exporter-otlp openinference-instrumentation-smolagents
```
A continuación, Alfred ya ha creado una cuenta en Langfuse y tiene sus claves API listas. Si aún no lo has hecho, puedes registrarte en Langfuse Cloud [aquí](https://cloud.langfuse.com/) o explorar [alternativas](https://huggingface.co/docs/smolagents/tutorials/inspect_runs).
Una vez que tengas tus claves API, deben configurarse correctamente de la siguiente manera:
```python
import os
import base64
LANGFUSE_PUBLIC_KEY="pk-lf-..."
LANGFUSE_SECRET_KEY="sk-lf-..."
LANGFUSE_AUTH=base64.b64encode(f"{LANGFUSE_PUBLIC_KEY}:{LANGFUSE_SECRET_KEY}".encode()).decode()
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://cloud.langfuse.com/api/public/otel" # Región de datos EU
# os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://us.cloud.langfuse.com/api/public/otel" # Región de datos US
os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"Authorization=Basic {LANGFUSE_AUTH}"
```
Finalmente, Alfred está listo para inicializar el `SmolagentsInstrumentor` y comenzar a rastrear el rendimiento de su agente.
```python
from opentelemetry.sdk.trace import TracerProvider
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
trace_provider = TracerProvider()
trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter()))
SmolagentsInstrumentor().instrument(tracer_provider=trace_provider)
```
¡Alfred ahora está conectado 🔌! Las ejecuciones de `smolagents` se están registrando en Langfuse, dándole visibilidad completa del comportamiento del agente. Con esta configuración, está listo para revisar ejecuciones anteriores y refinar aún más su Agente Preparador de Fiestas.
```python
from smolagents import CodeAgent, InferenceClientModel
agent = CodeAgent(tools=[], model=InferenceClientModel())
alfred_agent = agent.from_hub('sergiopaniego/AlfredAgent', trust_remote_code=True)
alfred_agent.run("Dame la mejor lista de reproducción para una fiesta en la mansión de Wayne. La idea de la fiesta es un tema de 'mascarada de villanos'")
```
Alfred ahora puede acceder a estos registros [aquí](https://cloud.langfuse.com/project/cm7bq0abj025rad078ak3luwi/traces/995fc019255528e4f48cf6770b0ce27b?timestamp=2025-02-19T10%3A28%3A36.929Z) para revisarlos y analizarlos.
Mientras tanto, la [lista de reproducción sugerida](https://open.spotify.com/playlist/0gZMMHjuxMrrybQ7wTMTpw) establece el ambiente perfecto para los preparativos de la fiesta. ¿Genial, verdad? 🎶
---
Ahora que hemos creado nuestro primer Agente de Código, **aprendamos cómo podemos crear Agentes de Llamada a Herramientas**, el segundo tipo de agente disponible en `smolagents`.
## Recursos
- [Blog de smolagents](https://huggingface.co/blog/smolagents) - Introducción a smolagents e interacciones de código
- [smolagents: Construyendo Buenos Agentes](https://huggingface.co/docs/smolagents/tutorials/building_good_agents) - Mejores prácticas para agentes confiables
- [Construyendo Agentes Efectivos - Anthropic](https://www.anthropic.com/research/building-effective-agents) - Principios de diseño de agentes
- [Compartiendo ejecuciones con OpenTelemetry](https://huggingface.co/docs/smolagents/tutorials/inspect_runs) - Detalles sobre cómo configurar OpenTelemetry para rastrear tus agentes. | agents-course/units/es/unit2/smolagents/code_agents.mdx/0 | {
"file_path": "agents-course/units/es/unit2/smolagents/code_agents.mdx",
"repo_id": "agents-course",
"token_count": 7738
} | 8 |
# Introducción al Caso de Uso para RAG Agéntico

En esta unidad, ayudaremos a Alfred, nuestro amigable agente que está organizando la gala, utilizando RAG Agéntico para crear una herramienta que pueda usarse para responder preguntas sobre los invitados en la gala.
<Tip>
Este es un caso de uso 'del mundo real' para RAG Agéntico, que podrías utilizar en tus propios proyectos o lugares de trabajo. Si quieres obtener más de este proyecto, ¿por qué no lo pruebas en tu propio caso de uso y lo compartes en Discord?
</Tip>
Puedes elegir cualquiera de los frameworks discutidos en el curso para este caso de uso. Proporcionamos ejemplos de código para cada uno en pestañas separadas.
## Una Gala para Recordar
Ahora, es momento de ensuciarnos las manos con un caso de uso real. ¡Preparemos el escenario!
**Has decidido organizar la fiesta más extravagante y opulenta del siglo.** Esto significa banquetes lujosos, bailarines encantadores, DJs de renombre, bebidas exquisitas, un impresionante espectáculo de fuegos artificiales y mucho más.
Alfred, tu amigable agente de vecindario, se está preparando para atender todas tus necesidades para esta fiesta, y **Alfred va a gestionar todo por sí mismo**. Para hacerlo, necesita tener acceso a toda la información sobre la fiesta, incluyendo el menú, los invitados, el horario, pronósticos del clima, ¡y mucho más!
No solo eso, sino que también necesita asegurarse de que la fiesta sea un éxito, por lo que **necesita poder responder cualquier pregunta sobre la fiesta durante la fiesta**, mientras maneja situaciones inesperadas que puedan surgir.
No puede hacer esto solo, así que debemos asegurarnos de que Alfred tenga acceso a toda la información y herramientas que necesita.
Primero, vamos a darle una lista de requisitos estrictos para la gala.
## Los Requisitos de la Gala
Una persona debidamente educada en la época del **Renacimiento** necesita tener tres rasgos principales.
Él o ella necesitaba ser profundo en el **conocimiento de deportes, cultura y ciencia**. Por lo tanto, debemos asegurarnos de que podamos impresionar a nuestros invitados con nuestro conocimiento y proporcionarles una gala verdaderamente inolvidable.
Sin embargo, para evitar conflictos, hay algunos **temas, como la política y la religión, que deben evitarse en una gala.** Debe ser una fiesta divertida sin conflictos relacionados con creencias e ideales.
Según la etiqueta, **un buen anfitrión debe conocer los antecedentes de los invitados**, incluyendo sus intereses y esfuerzos. Un buen anfitrión también chismea y comparte historias sobre los invitados entre sí.
Por último, debemos asegurarnos de tener **algún conocimiento general sobre el clima** para poder encontrar continuamente una actualización en tiempo real que asegure el momento perfecto para lanzar los fuegos artificiales y terminar la gala con broche de oro. 🎆
Como puedes ver, Alfred necesita mucha información para organizar la gala.
Afortunadamente, podemos ayudar y preparar a Alfred dándole algo de **entrenamiento en Generación Aumentada por Recuperación (RAG)**.
¡Comencemos creando las herramientas que Alfred necesita para poder organizar la gala!
| agents-course/units/es/unit3/agentic-rag/introduction.mdx/0 | {
"file_path": "agents-course/units/es/unit3/agentic-rag/introduction.mdx",
"repo_id": "agents-course",
"token_count": 1191
} | 9 |
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/#fileId=https%3A//huggingface.co/agents-course/notebooks/blob/main/fr/bonus-unit2/monitoring-and-evaluating-agents.ipynb"},
]} />
# Observer et évaluer des agents
<Tip>
Vous pouvez suivre le code dans <a href="https://colab.research.google.com/#fileId=https%3A//huggingface.co/agents-course/notebooks/blob/main/fr/bonus-unit2/monitoring-and-evaluating-agents.ipynb" target="_blank">ce <i>notebook</i></a> que vous pouvez exécuter avec Google Colab.
</Tip>
Dans ce *notebook*, nous apprendrons comment **surveiller les étapes internes (traces) de notre agent** et **évaluer ses performances** en utilisant des outils d'observabilité *open source*.
La capacité d'observer et d'évaluer le comportement d'un agent est essentielle pour :
- Déboguer les problèmes lorsque les tâches échouent ou produisent des résultats sous-optimaux
- Contrôler les coûts et les performances en temps réel
- Améliorer la fiabilité et la sécurité grâce à un retour d'information continu
## Prérequis de l'exercice 🏗️
Avant d'exécuter ce *notebook*, assurez-vous d'avoir :
🔲 📚 **Etudier la section [Introduction aux agents](https://huggingface.co/learn/agents-course/fr/unit1/introduction)**
🔲 📚 **Etudier la section [le *framework* smolagents](https://huggingface.co/learn/agents-course/fr/unit2/smolagents/introduction)**
## Étape 0 : Installer les bibliothèques nécessaires
Nous aurons besoin de quelques bibliothèques qui nous permettront d'exécuter, de contrôler et d'évaluer nos agents :
```python
%pip install langfuse 'smolagents[telemetry]' openinference-instrumentation-smolagents datasets 'smolagents[gradio]' gradio --upgrade
```
## Étape 1 : Instrumenter votre agent
Dans ce *notebook*, nous utiliserons [Langfuse](https://langfuse.com/) comme outil d'observabilité, mais vous pouvez utiliser **n'importe quel autre service compatible avec OpenTelemetry**. Le code ci-dessous montre comment définir les variables d'environnement pour Langfuse (ou n'importe quel *endpoint OTel*) et comment instrumenter votre smolagent.
**Note :** Si vous utilisez LlamaIndex ou LangGraph, vous pouvez trouver de la documentation sur leur instrumentation [ici](https://langfuse.com/docs/integrations/llama-index/workflows) et [ici](https://langfuse.com/docs/integrations/langchain/example-python-langgraph).
D'abord, configurons les credentials *Langfuse* comme variables d'environnement. Obtenez vos clés API *Langfuse* en vous inscrivant sur [Langfuse Cloud](https://cloud.langfuse.com) ou en [auto-hébergeant Langfuse](https://langfuse.com/self-hosting).
```python
import os
# Obtenez les clés pour votre projet depuis la page des paramètres du projet : https://cloud.langfuse.com
os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-..."
os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-..."
os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 région EU
# os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # 🇺🇸 région US
```
Nous devons aussi configurer notre token *Hugging Face* pour les appels d'inférence.
```python
# Définissez vos tokens/secrets Hugging Face comme variable d'environnement
os.environ["HF_TOKEN"] = "hf_..."
```
Avec les variables d'environnement définies, nous pouvons maintenant initialiser le client *Langfuse*. `get_client()` initialise le client *Langfuse* en utilisant les credentials fournis dans les variables d'environnement.
```python
from langfuse import get_client
langfuse = get_client()
# Vérifier la connexion
if langfuse.auth_check():
print("Le client Langfuse est authentifié et prêt !")
else:
print("L'authentification a échoué. Veuillez vérifier vos credentials et hôte.")
```
Ensuite, nous pouvons configurer le `SmolagentsInstrumentor()` pour instrumenter notre *smolagent* et envoyer des traces à *Langfuse*.
```python
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
SmolagentsInstrumentor().instrument()
```
## Étape 2 : Testez votre instrumentation
Voici un simple *CodeAgent* de smolagents qui calcule `1+1`. Nous l'exécutons pour confirmer que l'instrumentation fonctionne correctement. Si tout est configuré correctement, vous verrez des logs/spans dans votre tableau de bord d'observabilité.
```python
from smolagents import InferenceClientModel, CodeAgent
# Créer un agent basique pour tester l'instrumentation
agent = CodeAgent(
tools=[],
model=InferenceClientModel()
)
agent.run("1+1=")
```
Consultez votre [Langfuse Traces Dashboard](https://cloud.langfuse.com/traces) (ou l'outil d'observabilité de votre choix) pour confirmer que les portées et les logs ont été enregistrés.
Exemple de capture d'écran de Langfuse :

_[Lien vers la trace](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/1b94d6888258e0998329cdb72a371155?timestamp=2025-03-10T11%3A59%3A41.743Z)_
## Étape 3 : Observer et évaluer un agent plus complexe
Maintenant que vous avez confirmé que votre instrumentation fonctionne, essayons une requête plus complexe afin de voir comment les mesures avancées (utilisation des *tokens*, latence, coûts, etc.) sont suivies.
```python
from smolagents import (CodeAgent, DuckDuckGoSearchTool, InferenceClientModel)
search_tool = DuckDuckGoSearchTool()
agent = CodeAgent(tools=[search_tool], model=InferenceClientModel())
agent.run("Combien de Rubik's Cubes pourrait-on faire tenir dans la Cathédrale Notre-Dame ?")
```
### Structure de la trace
La plupart des outils d'observabilité enregistrent une **trace** qui contient des **spans**, qui représentent chaque étape de la logique de votre agent. Ici, la trace contient l'exécution globale de l'agent et les sous-périodes pour :
- les appels à l'outil (DuckDuckGoSearchTool)
- Les appels LLM (InferenceClientModel)
Vous pouvez les inspecter pour voir précisément où le temps est passé, combien de *tokens* sont utilisés, etc. :

_[Lien vers la trace](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/1ac33b89ffd5e75d4265b62900c348ed?timestamp=2025-03-07T13%3A45%3A09.149Z&display=preview)_
## Évaluation en ligne
Dans la section précédente, nous avons appris la différence entre l'évaluation en ligne et hors ligne. Nous allons maintenant voir comment surveiller votre agent en production et l'évaluer en direct.
### Métriques courantes à suivre en production
1. **Coûts** - L'instrumentation smolagents capture l'utilisation des *tokens*, que vous pouvez transformer en coûts approximatifs en assignant un prix par *token*.
2. **Latence** - Observez le temps nécessaire à la réalisation de chaque étape ou de l'ensemble de l'exécution.
3. **Retour utilisateur** - Les utilisateurs peuvent fournir un retour direct (pouce vers le haut/vers le bas) pour aider à affiner ou à corriger l'agent.
4. ***LLM-as-a-Judge*** - Utilisez un autre LLM pour évaluer les résultats de votre agent en quasi temps réel (par exemple, vérification de la toxicité ou de l'exactitude des résultats).
Ci-dessous, nous montrons des exemples de ces métriques.
#### 1. Coûts
Vous trouverez ci-dessous une capture d'écran montrant l'utilisation des appels `Qwen2.5-Coder-32B-Instruct`. Ceci est utile pour voir les étapes coûteuses et optimiser votre agent.

_[Lien vers la trace](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/1ac33b89ffd5e75d4265b62900c348ed?timestamp=2025-03-07T13%3A45%3A09.149Z&display=preview)_
#### 2. Temps de latence
Nous pouvons également voir combien de temps a duré chaque étape. Dans l'exemple ci-dessous, l'ensemble de la conversation a duré 32 secondes, que vous pouvez répartir par étape. Cela vous permet d'identifier les goulets d'étranglement et d'optimiser votre agent.

_[Lien vers la trace](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/1ac33b89ffd5e75d4265b62900c348ed?timestamp=2025-03-07T13%3A45%3A09.149Z&display=preview)_blank
#### 3. Attributs supplémentaires
Vous pouvez également passer des attributs supplémentaires à vos spans. Ceux-ci peuvent inclure `user_id`, `tags`, `session_id`, et des métadonnées personnalisées. Enrichir les traces avec ces détails est important pour l'analyse, le débogage et la surveillance du comportement de votre application à travers différents utilisateurs ou sessions.
```python
from smolagents import (CodeAgent, DuckDuckGoSearchTool, InferenceClientModel)
search_tool = DuckDuckGoSearchTool()
agent = CodeAgent(
tools=[search_tool],
model=InferenceClientModel()
)
with langfuse.start_as_current_span(
name="Smolagent-Trace",
) as span:
# Exécutez votre application ici
response = agent.run("Quelle est la capitale de l'Allemagne ?")
# Passez des attributs supplémentaires au span
span.update_trace(
input="Quelle est la capitale de l'Allemagne ?",
output=response,
user_id="smolagent-user-123",
session_id="smolagent-session-123456789",
tags=["question-ville", "test-agents"],
metadata={"email": "user@langfuse.com"},
)
# Flusher les événements dans les applications de courte durée
langfuse.flush()
```

#### 4. #### 4. Retour utilisateur
Si votre agent est intégré dans une interface utilisateur, vous pouvez enregistrer les réactions directes de l'utilisateur (comme un pouce levé ou baissé dans une interface de discussion). Vous trouverez ci-dessous un exemple utilisant [Gradio](https://gradio.app/) pour intégrer un chat avec un mécanisme de retour d'information simple.
Dans l'extrait de code ci-dessous, lorsqu'un utilisateur envoie un message de chat, nous capturons la trace dans Langfuse. Si l'utilisateur aime ou n'aime pas la dernière réponse, nous attribuons un score à la trace.
```python
import gradio as gr
from smolagents import (CodeAgent, InferenceClientModel)
from langfuse import get_client
langfuse = get_client()
model = InferenceClientModel()
agent = CodeAgent(tools=[], model=model, add_base_tools=True)
trace_id = None
def respond(prompt, history):
with langfuse.start_as_current_span(
name="Smolagent-Trace"):
# Exécutez votre application ici
output = agent.run(prompt)
global trace_id
trace_id = langfuse.get_current_trace_id()
history.append({"role": "assistant", "content": str(output)})
return history
def handle_like(data: gr.LikeData):
# Pour la démonstration, nous mappons les retours utilisateur à 1 (j'aime) ou 0 (je n'aime pas)
if data.liked:
langfuse.create_score(
value=1,
name="user-feedback",
trace_id=trace_id
)
else:
langfuse.create_score(
value=0,
name="user-feedback",
trace_id=trace_id
)
with gr.Blocks() as demo:
chatbot = gr.Chatbot(label="Chat", type="messages")
prompt_box = gr.Textbox(placeholder="Tapez votre message...", label="Votre message")
# Lorsque l'utilisateur appuie sur "Enter", nous exécutons 'respond'
prompt_box.submit(
fn=respond,
inputs=[prompt_box, chatbot],
outputs=chatbot
)
# Lorsque l'utilisateur clique sur le bouton "J'aime" d'un message, nous exécutons 'handle_like'
chatbot.like(handle_like, None, None)
demo.launch()
```
Les retours des utilisateurs sont ensuite saisis dans votre outil d'observabilité :

#### 5. LLM-as-a-Judge
*LLM-as-a-Judge* est une autre façon d'évaluer automatiquement les résultats de votre agent. Vous pouvez configurer l'appel d'un autre LLM pour évaluer l'exactitude, la toxicité, le style ou tout autre critère qui vous intéresse.
**Fonctionnement** :
1. Vous définissez un **Modèle d'évaluation**, par exemple, « Vérifier si le texte est toxique ».
2. Chaque fois que votre agent génère un résultat, vous transmettez ce résultat à votre LLM juge avec le gabarit.
3. Le LLM juge répond avec un score ou une étiquette que vous enregistrez dans votre outil d'observabilité.
Exemple de Langfuse :


```python
# Exemple : Vérifier si la sortie de l'agent est toxique ou non
from smolagents import (CodeAgent, DuckDuckGoSearchTool, InferenceClientModel)
search_tool = DuckDuckGoSearchTool()
agent = CodeAgent(tools=[search_tool], model=InferenceClientModel())
agent.run("Manger des carottes peut-il améliorer votre vision ?")
```
Vous pouvez voir que la réponse de cet exemple est jugée « non toxique ».

#### 6. Aperçu des métriques d'observabilité
Toutes ces métriques peuvent être visualisées ensemble dans des tableaux de bord. Cela vous permet de voir rapidement les performances de votre agent sur plusieurs sessions et vous aide à suivre les mesures de qualité au fil du temps.

## Évaluation hors ligne
L'évaluation en ligne est essentielle pour obtenir un retour d'information en temps réel, mais vous avez également besoin d'une **évaluation hors ligne**, c'est-à-dire de vérifications systématiques avant ou pendant le développement. Cela permet de maintenir la qualité et la fiabilité avant de mettre les changements en production.
### Évaluation d'un jeu de données
Lors d'une évaluation hors ligne, vous devez généralement
1. Disposer d'un jeu de données de référence (avec des paires de *prompts* et de résultats attendus)
2. Exécuter votre agent sur ce jeu de données
3. Comparer les résultats aux résultats attendus ou utiliser un mécanisme de notation supplémentaire.
Ci-dessous, nous démontrons cette approche avec le jeu de données [GSM8K](https://huggingface.co/datasets/gsm8k), qui contient des questions et des solutions mathématiques.
```python
import pandas as pd
from datasets import load_dataset
# Récupérer GSM8K sur Hugging Face
dataset = load_dataset("openai/gsm8k", 'main', split='train')
df = pd.DataFrame(dataset)
print("Premières lignes du jeu de données GSM8K :")
print(df.head())
```
Ensuite, nous créons un jeu de données dans Langfuse pour suivre les exécutions. Nous ajoutons ensuite chaque élément du jeu de données au système.
(Si vous n'utilisez pas Langfuse, vous pouvez simplement les stocker dans votre propre base de données ou dans un fichier local à des fins d'analyse).
```python
from langfuse import get_client
langfuse = get_client()
langfuse_dataset_name = "gsm8k_dataset_huggingface"
# Créer un jeu de données dans Langfuse
langfuse.create_dataset(
name=langfuse_dataset_name,
description="Jeu de données de référence GSM8K téléchargé depuis Huggingface",
metadata={
"date": "2025-03-10",
"type": "benchmark"
}
)
```
```python
for idx, row in df.iterrows():
langfuse.create_dataset_item(
dataset_name=langfuse_dataset_name,
input={"text": row["question"]},
expected_output={"text": row["answer"]},
metadata={"source_index": idx}
)
if idx >= 9: # Télécharger seulement les 10 premiers éléments pour la démonstration
break
```

#### Exécution de l'agent sur le jeu de données
Nous définissons une fonction d'aide `run_smolagent()` qui :
1. Démarre un span Langfuse
2. Exécute notre agent sur le *prompt*
3. Enregistre l'ID de la trace dans Langfuse
Ensuite, nous parcourons en boucle chaque élément de l'ensemble de données, nous exécutons l'agent et nous lions la trace à l'élément de l'ensemble de données. Nous pouvons également joindre une note d'évaluation rapide si vous le souhaitez.
```python
from opentelemetry.trace import format_trace_id
from smolagents import (CodeAgent, InferenceClientModel, LiteLLMModel)
from langfuse import get_client
langfuse = get_client()
# Exemple : utiliser InferenceClientModel ou LiteLLMModel pour accéder aux modèles openai, anthropic, gemini, etc. :
model = InferenceClientModel()
agent = CodeAgent(
tools=[],
model=model,
add_base_tools=True
)
dataset_name = "gsm8k_dataset_huggingface"
current_run_name = "smolagent-notebook-run-01" # Identifie cette exécution d'évaluation spécifique
# Supposons que 'run_smolagent' est votre fonction d'application instrumentée
def run_smolagent(question):
with langfuse.start_as_current_generation(name="qna-llm-call") as generation:
# Simuler un appel LLM
result = agent.run(question)
# Mettre à jour la trace avec l'entrée et la sortie
generation.update_trace(
input= question,
output=result,
)
return result
dataset = langfuse.get_dataset(name=dataset_name) # Récupérer votre jeu de données pré-peuplé
for item in dataset.items:
# Utiliser le gestionnaire de contexte item.run()
with item.run(
run_name=current_run_name,
run_metadata={"model_provider": "Hugging Face", "temperature_setting": 0.7},
run_description="Exécution d'évaluation pour le jeu de données GSM8K"
) as root_span: # root_span est le span racine de la nouvelle trace pour cet élément et exécution.
# Toutes les opérations langfuse suivantes dans ce bloc font partie de cette trace.
# Appelez votre logique d'application
generated_answer = run_smolagent(question=item.input["text"])
print(item.input)
```
Vous pouvez répéter ce processus avec différents :
- Modèles (OpenAI GPT, LLM local, etc.)
- Outils (recherche ou pas recherche)
- Prompts (différents messages du système)
Ensuite, comparez-les côte à côte dans votre outil d'observabilité :


## Réflexions finales
Dans ce *notebook*, nous avons vu comment :
1. **Mettre en place l'observabilité** en utilisant les exportateurs smolagents + OpenTelemetry
2. **Vérifier l'instrumentation** en lançant un agent simple
3. **Capturez des métriques détaillées** (coût, latence, etc.) à l'aide d'outils d'observabilité
4. **Recueillir les commentaires des utilisateurs** via une interface Gradio
5. **Utiliser un LLM-as-a-Judge** pour évaluer automatiquement les résultats
6. **Effectuer une évaluation hors ligne** avec un jeu de données de référence
🤗 Bon codage ! | agents-course/units/fr/bonus-unit2/monitoring-and-evaluating-agents-notebook.mdx/0 | {
"file_path": "agents-course/units/fr/bonus-unit2/monitoring-and-evaluating-agents-notebook.mdx",
"repo_id": "agents-course",
"token_count": 7618
} | 10 |
# Conclusion [[conclusion]]
Félicitations pour avoir terminé cette première Unité 🥳
Vous **maîtrisez les fondamentaux** et avez créé votre premier agent !
Il est **normal que vous soyez encore un peu confus par certains éléments**. Les agents sont un sujet complexe et il est courant de mettre un certain temps à tout comprendre.
**Prenez le temps de bien assimiler le contenu** avant de continuer. Il est important de maîtriser ces éléments et d'avoir une base solide avant de passer à la partie amusante.
Et si vous réussissez le quiz, n'oubliez pas de récupérer votre certificat 🎓 👉 [ici](https://huggingface.co/spaces/agents-course/unit1-certification-app)
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/certificate-example.jpg" alt="Exemple de Certificat"/>
Dans la prochaine unité (bonus), vous allez apprendre **à finetuner un agent pour qu'il puisse appeler des fonctions (i.e être capable d'appeler des outils en fonction du *prompt* de l'utilisateur)**.
Enfin, nous serions ravis **d'entendre ce que vous pensez du cours et comment nous pourrions l'améliorer**. Si vous avez des retours, n'hésitez pas à [remplir ce formulaire](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog).
### Continuez à apprendre, restez géniaux 🤗
| agents-course/units/fr/unit1/conclusion.mdx/0 | {
"file_path": "agents-course/units/fr/unit1/conclusion.mdx",
"repo_id": "agents-course",
"token_count": 513
} | 11 |
# Graphe d'analyse de documents
Alfred à votre service. En tant que majordome de confiance de M. Wayne, j'ai pris la liberté de documenter comment j'aide M. Wayne avec ses divers besoins documentaires. Pendant qu'il s'occupe de ses... activités nocturnes, je m'assure que tous ses papiers, programmes d'entraînement et plans nutritionnels sont correctement analysés et organisés.
Avant de partir, il a laissé une note avec son programme d'entraînement de la semaine. J'ai alors pris la responsabilité de proposer un **menu** pour les repas de demain.
Pour de futurs événements similaires, créons un système d'analyse de documents utilisant LangGraph pour servir les besoins de M. Wayne. Ce système peut :
1. Traiter des documents sous forme d'image
2. Extraire du texte en utilisant des modèles de vision (*Vision Language Model*)
3. Effectuer des calculs quand nécessaire (pour démontrer l'utilisation d'outils normaux)
4. Analyser le contenu et fournir des résumés concis
5. Exécuter des instructions spécifiques liées aux documents
## Le *workflow* du majordome
Le *workflow* que nous allons construire suit ce schéma structuré :

<Tip>
Vous pouvez suivre le code dans <a href="https://huggingface.co/agents-course/notebooks/blob/main/fr/unit2/langgraph/agent.ipynb" target="_blank">ce <i>notebook</i></a> que vous pouvez exécuter avec Google Colab.
</Tip>
## Configuration de l'environnement
```python
%pip install langgraph langchain_openai langchain_core
```
et les imports :
```python
import base64
from typing import List, TypedDict, Annotated, Optional
from langchain_openai import ChatOpenAI
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage
from langgraph.graph.message import add_messages
from langgraph.graph import START, StateGraph
from langgraph.prebuilt import ToolNode, tools_condition
from IPython.display import Image, display
```
## Définir l'état de l'agent
Cet état est un peu plus complexe que les précédents que nous avons vus.
`AnyMessage` est une classe de LangChain qui définit les messages, et `add_messages` est un opérateur qui ajoute le dernier message plutôt que de l'écraser avec le dernier état.
C'est un nouveau concept dans LangGraph, où vous pouvez ajouter des opérateurs dans votre état pour définir la façon dont ils doivent interagir ensemble.
```python
class AgentState(TypedDict):
# Le document fourni
input_file: Optional[str] # Contient le chemin du fichier (PDF/PNG)
messages: Annotated[list[AnyMessage], add_messages]
```
## Préparer les outils
```python
vision_llm = ChatOpenAI(model="gpt-4o")
def extract_text(img_path: str) -> str:
"""
Extraire le texte d'un fichier image en utilisant un modèle multimodal.
Maître Wayne laisse souvent des notes avec son régime d'entraînement ou ses plans de repas.
Cela me permet d'analyser correctement le contenu.
"""
all_text = ""
try:
# Lire l'image et encoder en base64
with open(img_path, "rb") as image_file:
image_bytes = image_file.read()
image_base64 = base64.b64encode(image_bytes).decode("utf-8")
# Préparer le prompt incluant les données d'image base64
message = [
HumanMessage(
content=[
{
"type": "text",
"text": (
"Extrayez tout le texte de cette image. "
"Retournez seulement le texte extrait, sans explications."
),
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/png;base64,{image_base64}"
},
},
]
)
]
# Appeler le VLM
response = vision_llm.invoke(message)
# Ajouter le texte extrait
all_text += response.content + "\n\n"
return all_text.strip()
except Exception as e:
# Un majordome doit gérer les erreurs avec élégance
error_msg = f"Erreur lors de l'extraction du texte : {str(e)}"
print(error_msg)
return ""
def divide(a: int, b: int) -> float:
"""Diviser a et b - pour les calculs occasionnels de Maître Wayne."""
return a / b
# Équiper le majordome avec des outils
tools = [
divide,
extract_text
]
llm = ChatOpenAI(model="gpt-4o")
llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False)
```
## Les nœuds
```python
def assistant(state: AgentState):
# Message système
textual_description_of_tool="""
extract_text(img_path: str) -> str:
Extraire le texte d'un fichier image en utilisant un modèle multimodal.
Args:
img_path: Un chemin de fichier image local (chaînes).
Returns:
Une chaîne unique contenant le texte concaténé extrait de chaque image.
divide(a: int, b: int) -> float:
Diviser a et b
"""
image=state["input_file"]
sys_msg = SystemMessage(content=f"Vous êtes un majordome serviable nommé Alfred qui sert M. Wayne et Batman. Vous pouvez analyser des documents et effectuer des calculs avec les outils fournis :\n{textual_description_of_tool} \n Vous avez accès à quelques images optionnelles. Actuellement l'image chargée est : {image}")
return {
"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])],
"input_file": state["input_file"]
}
```
## Le modèle *ReAct* : Comment j'aide M. Wayne
Permettez-moi d'expliquer l'approche dans cet agent. L'agent suit ce qu'on appelle le modèle *ReAct* (*Reason-Act-Observe*)
1. **Réfléchir** sur ses documents et demandes
2. **Agir** en utilisant les outils appropriés
3. **Observer** les résultats
4. **Répéter** si nécessaire jusqu'à ce que j'aie pleinement répondu à ses besoins
C'est une implémentation simple d'un agent utilisant LangGraph.
```python
# Le graphe
builder = StateGraph(AgentState)
# Définir les nœuds : ceux-ci font le travail
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# Définir les arêtes : celles-ci déterminent comment le flux de contrôle se déplace
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# Si le dernier message nécessite un outil, router vers les outils
# Sinon, fournir une réponse directe
tools_condition,
)
builder.add_edge("tools", "assistant")
react_graph = builder.compile()
# Montrer le processus de réflexion du majordome
display(Image(react_graph.get_graph(xray=True).draw_mermaid_png()))
```
Nous définissons un nœud `tools` avec notre liste d'outils. Le nœud `assistant` est juste notre modèle avec les outils liés.
Nous créons un graphe avec les nœuds `assistant` et `tools`.
Nous ajoutons une arête `tools_condition`, qui route vers `End` ou vers `tools` selon que l'`assistant` appelle un outil.
Maintenant, nous ajoutons une nouvelle étape :
Nous connectons le nœud `tools` de retour à l'`assistant`, formant une boucle.
- Après l'exécution du nœud `assistant`, `tools_condition` vérifie si la sortie du modèle est un appel d'outil.
- Si c'est un appel d'outil, le flux est dirigé vers le nœud `tools`.
- Le nœud `tools` se reconnecte à `assistant`.
- Cette boucle continue tant que le modèle décide d'appeler des outils.
- Si la réponse du modèle n'est pas un appel d'outil, le flux est dirigé vers *END*, terminant le processus.

## Le majordome en action
### Exemple 1 : Calculs simples
Voici un exemple pour montrer un cas d'usage simple d'un agent utilisant un outil dans LangGraph.
```python
messages = [HumanMessage(content="Divisez 6790 par 5")]
messages = react_graph.invoke({"messages": messages, "input_file": None})
# Montrer les messages
for m in messages['messages']:
m.pretty_print()
```
La conversation se déroulerait :
```
Humain : Divisez 6790 par 5
Appel d'un outil : divide(a=6790, b=5)
Réponse de l'outil : 1358.0
Alfred : Le résultat de la division de 6790 par 5 est 1358.0.
```
### Exemple 2 : Analyser les documents d'entraînement de Maître Wayne
Quand Maître Wayne laisse ses notes d'entraînement et de repas :
```python
messages = [HumanMessage(content="Selon la note fournie par M. Wayne dans les images fournies. Quelle est la liste des articles que je dois acheter pour le menu du dîner ?")]
messages = react_graph.invoke({"messages": messages, "input_file": "Batman_training_and_meals.png"})
```
L'interaction se déroulerait :
```
Humain : Selon la note fournie par M. Wayne dans les images fournies. Quelle est la liste des articles que je dois acheter pour le menu du dîner ?
Appel d'un outil : extract_text(img_path="Batman_training_and_meals.png")
Réponse de l'outil : [Texte extrait avec le programme d'entraînement et les détails du menu]
Alfred : Pour le menu du dîner, vous devriez acheter les articles suivants :
1. Steak de surlonge local nourri à l'herbe
2. Épinards biologiques
3. Poivrons *piquillo*
4. Pommes de terre (pour pommes de terre aux herbes dorées au four)
5. Huile de poisson (2 grammes)
Assurez-vous que le steak soit nourri à l'herbe et que les épinards et poivrons soient biologiques pour un repas de la meilleure qualité.
```
## Points clés à retenir
Si vous souhaitez créer votre propre majordome d'analyse de documents, voici les considérations clés :
1. **Définir des outils clairs** pour des tâches spécifiques liées aux documents
2. **Créer un suivi d'état robuste** pour maintenir le contexte entre les appels d'outils
3. **Considérer la gestion d'erreurs** pour les échecs d'outils
4. **Maintenir la conscience contextuelle** des interactions précédentes (assurée par l'opérateur `add_messages`)
Avec ces principes, vous pouvez vous aussi fournir un service d'analyse de documents exemplaire digne du manoir Wayne.
*J'espère que cette explication a été satisfaisante. Maintenant, si vous voulez bien m'excuser, la cape de Maître Wayne nécessite un repassage avant les activités de ce soir.* | agents-course/units/fr/unit2/langgraph/document_analysis_agent.mdx/0 | {
"file_path": "agents-course/units/fr/unit2/langgraph/document_analysis_agent.mdx",
"repo_id": "agents-course",
"token_count": 4065
} | 12 |
# Conclusion
Félicitations d'avoir terminé le module `smolagents` de cette deuxième unité 🥳
Vous **maîtrisez les fondamentaux** de `smolagents` et vous avez construit votre propre agent ! A présent que vous avez des compétences sur `smolagents`, vous pouvez maintenant commencer à créer des agents qui résoudront des tâches qui vous intéressent.
Dans le prochain module, vous allez apprendre **comment construire des agents avec LlamaIndex**.
Enfin, nous serions ravis **d'entendre ce que vous pensez du cours et comment nous pourrions l'améliorer**. Si vous avez des retours, n'hésitez pas à [remplir ce formulaire](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog).
### Continuez à apprendre, restez géniaux 🤗
| agents-course/units/fr/unit2/smolagents/conclusion.mdx/0 | {
"file_path": "agents-course/units/fr/unit2/smolagents/conclusion.mdx",
"repo_id": "agents-course",
"token_count": 305
} | 13 |
# Création d'un RAG pour converser avec les invités
Alfred, votre agent de confiance, se prépare pour le gala le plus extravagant du siècle. Pour s'assurer que l'événement se déroule sans encombre, il a besoin d'un accès rapide à des informations à jour sur chaque invité. Aidons le en créant un outil RAG alimenté par notre jeu de données personnalisé.
## Pourquoi un RAG pour un gala ?
Imaginez Alfred se mêlant aux invités, ayant besoin de se rappeler des détails spécifiques sur chaque personne à tout moment. Un LLM traditionnel pourrait avoir du mal avec cette tâche parce que :
1. La liste des invités est spécifique à votre événement et ne fait pas partie des données d'entraînement du modèle
2. Les informations sur les invités peuvent changer ou être mises à jour fréquemment
3. Alfred doit récupérer des détails précis comme les adresses email
C'est là que le RAG brille ! En combinant un système de récupération avec un LLM, Alfred peut accéder à des informations précises et à jour sur vos invités à la demande.
<Tip>
Vous pouvez choisir n'importe lequel des <i>frameworks</i> couverts dans le cours pour ce cas d'usage. Sélectionnez votre option préférée dans les onglets de code.
</Tip>
## Configuration de notre application
Dans cette unité, nous développerons notre agent au sein d'un *Space*, sous la forme d'un projet Python structuré. Cette approche nous aide à maintenir un code propre et modulaire en organisant différentes fonctionnalités dans des fichiers séparés. De plus, cela permet un cas d'usage plus réaliste où vous déploieriez l'application pour une utilisation publique.
### Structure du projet
- **`tools.py`** – Fournit des outils auxiliaires pour l'agent.
- **`retriever.py`** – Implémente les fonctions de récupération pour soutenir l'accès à la connaissance.
- **`app.py`** – Intègre tous les composants dans un agent entièrement fonctionnel, que nous finaliserons dans la dernière partie de cette unité.
Pour une référence pratique, consultez [ce *Space*](https://huggingface.co/spaces/agents-course/Unit_3_Agentic_RAG) sur un RAG agentique. N'hésitez pas à le cloner et à expérimenter !
Vous pouvez tester directement l'agent ci-dessous :
<iframe
src="https://agents-course-unit-3-agentic-rag.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
## Aperçu du jeu de données
Notre jeu de données [`agents-course/unit3-invitees`](https://huggingface.co/datasets/agents-course/unit3-invitees/) contient les champs suivants pour chaque invité :
- **Name** : Nom complet de l'invité
- **Relation** : Comment l'invité est lié à l'hôte
- **Description** : Une brève biographie ou des faits intéressants sur l'invité
- **Email Address** : Informations de contact pour envoyer des invitations ou des suivis
Voici un aperçu du jeu de données :
<iframe
src="https://huggingface.co/datasets/agents-course/unit3-invitees/embed/viewer/default/train"
frameborder="0"
width="100%"
height="560px"
></iframe>
<Tip>
Dans un scénario réel, ce jeu de données pourrait être étendu pour inclure les préférences alimentaires, les intérêts pour les cadeaux, les sujets de conversation à éviter, et d'autres détails utiles pour un hôte.
</Tip>
## Construction d'un outil pour un livre d'or
Nous allons créer un outil personnalisé qu'Alfred peut utiliser pour récupérer rapidement les informations sur les invités pendant le gala. Décomposons cela en trois étapes gérables :
1. Charger et préparer le jeu de données
2. Créer l'outil de récupération
3. Intégrer l'outil à Alfred
Commençons par charger et préparer le jeu de données !
### Étape 1 : Charger et préparer le jeu de données
Tout d'abord, nous devons transformer nos données brutes sur les invités en un format optimisé pour la récupération.
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
Nous utiliserons la bibliothèque `datasets` d'Hugging Face pour charger le jeu de données et le convertir en une liste d'objets `Document` du module `langchain.docstore.document`.
```python
import datasets
from langchain_core.documents import Document
# Charger le jeu de données
guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train")
# Convertir les entrées du jeu de données en objets Document
docs = [
Document(
page_content="\n".join([
f"Name: {guest['name']}",
f"Relation: {guest['relation']}",
f"Description: {guest['description']}",
f"Email: {guest['email']}"
]),
metadata={"name": guest["name"]}
)
for guest in guest_dataset
]
```
</hfoption>
<hfoption id="llama-index">
Nous utiliserons la bibliothèque `datasets` d'Hugging Face pour charger le jeu de données et le convertir en une liste d'objets `Document` du module `llama_index.core.schema`.
```python
import datasets
from llama_index.core.schema import Document
# Charger le jeu de données
guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train")
# Convertir les entrées du jeu de données en objets Document
docs = [
Document(
text="\n".join([
f"Name: {guest_dataset['name'][i]}",
f"Relation: {guest_dataset['relation'][i]}",
f"Description: {guest_dataset['description'][i]}",
f"Email: {guest_dataset['email'][i]}"
]),
metadata={"name": guest_dataset['name'][i]}
)
for i in range(len(guest_dataset))
]
```
</hfoption>
<hfoption id="langgraph">
Nous utiliserons la bibliothèque `datasets` d'Hugging Face pour charger le jeu de données et le convertir en une liste d'objets `Document` du module `langchain.docstore.document`.
```python
import datasets
from langchain_core.documents import Document
# Charger le jeu de données
guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train")
# Convertir les entrées du jeu de données en objets Document
docs = [
Document(
page_content="\n".join([
f"Name: {guest['name']}",
f"Relation: {guest['relation']}",
f"Description: {guest['description']}",
f"Email: {guest['email']}"
]),
metadata={"name": guest["name"]}
)
for guest in guest_dataset
]
```
</hfoption>
</hfoptions>
Dans le code ci-dessus, nous :
- Chargeons le jeu de données
- Convertissons chaque entrée d'invité en un objet `Document` avec du contenu formaté
- Stockons les objets `Document` dans une liste
Cela signifie que nous avons toutes nos données bien disponibles pour pouvoir commencer à configurer notre récupération.
### Étape 2 : Créer l'outil de récupération
Maintenant, créons un outil personnalisé qu'Alfred peut utiliser pour rechercher dans nos informations sur les invités.
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
Nous utiliserons le `BM25Retriever` du module `langchain_community.retrievers` pour créer un outil de récupération.
<Tip>
Le <code>BM25Retriever</code> est un excellent point de départ pour la récupération, mais pour une recherche sémantique plus avancée, vous pourriez considérer l'utilisation de récupérateurs basés sur des <i>embeddings</i> comme ceux de <a href="https://www.sbert.net/">sentence-transformers</a>.
</Tip>
```python
from smolagents import Tool
from langchain_community.retrievers import BM25Retriever
class GuestInfoRetrieverTool(Tool):
name = "guest_info_retriever"
description = "Récupère des informations détaillées sur les invités du gala basées sur leur nom ou relation."
inputs = {
"query": {
"type": "string",
"description": "Le nom ou la relation de l'invité sur lequel vous voulez des informations."
}
}
output_type = "string"
def __init__(self, docs):
self.is_initialized = False
self.retriever = BM25Retriever.from_documents(docs)
def forward(self, query: str):
results = self.retriever.get_relevant_documents(query)
if results:
return "\n\n".join([doc.page_content for doc in results[:3]])
else:
return "Aucune information d'invité correspondante trouvée."
# Initialiser l'outil
guest_info_tool = GuestInfoRetrieverTool(docs)
```
Comprenons cet outil étape par étape :
- Le `name` et la `description` aident l'agent à comprendre quand et comment utiliser cet outil
- Les `inputs` définissent quels paramètres l'outil attend (dans ce cas, une requête de recherche)
- Nous utilisons un `BM25Retriever`, qui est un algorithme de récupération de texte puissant qui ne nécessite pas d'*embeddings*
- La méthode `forward` traite la requête et retourne les informations d'invité les plus pertinentes
</hfoption>
<hfoption id="llama-index">
Nous utiliserons le `BM25Retriever` du module `llama_index.retrievers.bm25` pour créer un outil de récupération.
<Tip>
Le <code>BM25Retriever</code> est un excellent point de départ pour la récupération, mais pour une recherche sémantique plus avancée, vous pourriez considérer l'utilisation de récupérateurs basés sur des *embeddings* comme ceux de <a href="https://www.sbert.net/">sentence-transformers</a>.
</Tip>
```python
from llama_index.core.tools import FunctionTool
from llama_index.retrievers.bm25 import BM25Retriever
bm25_retriever = BM25Retriever.from_defaults(nodes=docs)
def get_guest_info_retriever(query: str) -> str:
"""Récupère des informations détaillées sur les invités du gala basées sur leur nom ou relation."""
results = bm25_retriever.retrieve(query)
if results:
return "\n\n".join([doc.text for doc in results[:3]])
else:
return "Aucune information d'invité correspondante trouvée."
# Initialiser l'outil
guest_info_tool = FunctionTool.from_defaults(get_guest_info_retriever)
```
Comprenons cet outil étape par étape :
- La *docstring* aide l'agent à comprendre quand et comment utiliser cet outil
- Les décorateurs de type définissent quels paramètres l'outil attend (dans ce cas, une requête de recherche)
- Nous utilisons un `BM25Retriever`, qui est un algorithme de récupération de texte puissant qui ne nécessite pas d'*embeddings*
- La méthode traite la requête et retourne les informations d'invité les plus pertinentes
</hfoption>
<hfoption id="langgraph">
Nous utiliserons le `BM25Retriever` du module `langchain_community.retrievers` pour créer un outil de récupération.
<Tip>
Le <code>BM25Retriever</code> est un excellent point de départ pour la récupération, mais pour une recherche sémantique plus avancée, vous pourriez considérer l'utilisation de récupérateurs basés sur des *embeddings* comme ceux de <a href="https://www.sbert.net/">sentence-transformers</a>.
</Tip>
```python
from langchain_community.retrievers import BM25Retriever
from langchain.tools import Tool
bm25_retriever = BM25Retriever.from_documents(docs)
def extract_text(query: str) -> str:
"""Récupère des informations détaillées sur les invités du gala basées sur leur nom ou relation."""
results = bm25_retriever.invoke(query)
if results:
return "\n\n".join([doc.page_content for doc in results[:3]])
else:
return "Aucune information d'invité correspondante trouvée."
guest_info_tool = Tool(
name="guest_info_retriever",
func=extract_text,
description="Récupère des informations détaillées sur les invités du gala basées sur leur nom ou relation."
)
```
Comprenons cet outil étape par étape :
- Le `name` et la `description` aident l'agent à comprendre quand et comment utiliser cet outil
- Les décorateurs de type définissent quels paramètres l'outil attend (dans ce cas, une requête de recherche)
- Nous utilisons un `BM25Retriever`, qui est un algorithme de récupération de texte puissant qui ne nécessite pas d'*embeddings*
- La méthode traite la requête et retourne les informations d'invité les plus pertinentes
</hfoption>
</hfoptions>
### Étape 3 : Intégrer l'outil avec Alfred
Enfin, assemblons le tout en créant notre agent et en l'équipant de notre outil personnalisé :
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
from smolagents import CodeAgent, InferenceClientModel
# Initialiser le modèle Hugging Face
model = InferenceClientModel()
# Créer Alfred, notre agent de gala, avec l'outil d'informations sur les invités
alfred = CodeAgent(tools=[guest_info_tool], model=model)
# Exemple de requête qu'Alfred pourrait recevoir pendant le gala
response = alfred.run("Parlez-moi de notre invitée nommée 'Lady Ada Lovelace'.")
print("🎩 Réponse d'Alfred :")
print(response)
```
Sortie attendue :
```
🎩 Réponse d'Alfred :
Basé sur les informations que j'ai récupérées, Lady Ada Lovelace est une mathématicienne estimée et une amie. Elle est renommée pour son travail pionnier en mathématiques et en informatique, souvent célébrée comme la première programmeuse informatique en raison de son travail sur la machine analytique de Charles Babbage. Son adresse email est ada.lovelace@example.com.
```
Ce qui se passe dans cette étape finale :
- Nous initialisons un modèle Hugging Face en utilisant la classe `InferenceClientModel`
- Nous créons notre agent (Alfred) comme un `CodeAgent`, qui peut exécuter du code Python pour résoudre des problèmes
- Nous demandons à Alfred de récupérer des informations sur une invitée nommée "Lady Ada Lovelace"
</hfoption>
<hfoption id="llama-index">
```python
from llama_index.core.agent.workflow import AgentWorkflow
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
# Initialiser le modèle Hugging Face
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
# Créer Alfred, notre agent de gala, avec l'outil d'informations sur les invités
alfred = AgentWorkflow.from_tools_or_functions(
[guest_info_tool],
llm=llm,
)
# Exemple de requête qu'Alfred pourrait recevoir pendant le gala
response = await alfred.run("Parlez-moi de notre invitée nommée 'Lady Ada Lovelace'.")
print("🎩 Réponse d'Alfred :")
print(response)
```
Sortie attendue :
```
🎩 Réponse d'Alfred :
Lady Ada Lovelace est une mathématicienne estimée et amie, renommée pour son travail pionnier en mathématiques et en informatique. Elle est célébrée comme la première programmeuse informatique en raison de son travail sur la machine analytique de Charles Babbage. Son email est ada.lovelace@example.com.
```
Ce qui se passe dans cette étape finale :
- Nous initialisons un modèle Hugging Face en utilisant la classe `HuggingFaceInferenceAPI`
- Nous créons notre agent (Alfred) comme un `AgentWorkflow`, incluant l'outil que nous venons de créer
- Nous demandons à Alfred de récupérer des informations sur une invitée nommée "Lady Ada Lovelace"
</hfoption>
<hfoption id="langgraph">
```python
from typing import TypedDict, Annotated
from langgraph.graph.message import add_messages
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage
from langgraph.prebuilt import ToolNode
from langgraph.graph import START, StateGraph
from langgraph.prebuilt import tools_condition
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
# Générer l'interface de chat, incluant les outils
llm = HuggingFaceEndpoint(
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
)
chat = ChatHuggingFace(llm=llm, verbose=True)
tools = [guest_info_tool]
chat_with_tools = chat.bind_tools(tools)
# Générer l'AgentState et le graphe d'agent
class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
def assistant(state: AgentState):
return {
"messages": [chat_with_tools.invoke(state["messages"])],
}
## Le graphe
builder = StateGraph(AgentState)
# Définir les nœuds : ils font le travail
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# Définir les arêtes : elles déterminent comment le flux de contrôle se déplace
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# Si le dernier message nécessite un outil, router vers les outils
# Sinon, fournir une réponse directe
tools_condition,
)
builder.add_edge("tools", "assistant")
alfred = builder.compile()
messages = [HumanMessage(content="Parlez-moi de notre invitée nommée 'Lady Ada Lovelace'.")]
response = alfred.invoke({"messages": messages})
print("🎩 Réponse d'Alfred :")
print(response['messages'][-1].content)
```
Sortie attendue :
```
🎩 Réponse d'Alfred :
Lady Ada Lovelace est une mathématicienne estimée et pionnière en informatique, souvent célébrée comme la première programmeuse informatique en raison de son travail sur la machine analytique de Charles Babbage.
```
Ce qui se passe dans cette étape finale :
- Nous initialisons un modèle Hugging Face en utilisant la classe `HuggingFaceEndpoint`. Nous générons aussi une interface de chat et ajoutons les outils.
- Nous créons notre agent (Alfred) comme un `StateGraph`, qui combine 2 nœuds (`assistant`, `tools`) en utilisant une arête
- Nous demandons à Alfred de récupérer des informations sur une invitée nommée "Lady Ada Lovelace"
</hfoption>
</hfoptions>
## Exemple d'interaction
Pendant le gala, une conversation pourrait se dérouler comme ceci :
**Vous :** "Alfred, qui est ce monsieur qui parle à l'ambassadeur ?"
**Alfred :** *recherche rapidement dans la base de données des invités* "C'est le Dr. Nikola Tesla, monsieur. C'est un vieil ami de votre période à l'université. Il vient récemment de breveter un nouveau système de transmission d'énergie sans fil et serait ravi d'en discuter avec vous. N'oubliez pas qu'il est passionné par les pigeons, donc cela pourrait faire une bonne conversation."
```json
{
"name": "Dr. Nikola Tesla",
"relation": "vieil ami des jours d'université",
"description": "Le Dr. Nikola Tesla est un vieil ami de votre période à l'université. Il vient récemment de breveter un nouveau système de transmission d'énergie sans fil et serait ravi d'en discuter avec vous. N'oubliez pas qu'il est passionné par les pigeons, donc cela pourrait faire une bonne conversation.",
"email": "nikola.tesla@gmail.com"
}
```
## Aller plus loin
Maintenant qu'Alfred peut récupérer des informations sur les invités, considérez comment vous pourriez améliorer ce système :
1. **Améliorer le récupérateur** pour utiliser un algorithme plus sophistiqué comme ceux disponibles dans [`sentence-transformers`](https://www.sbert.net/)
2. **Implémenter une mémoire de conversation** pour qu'Alfred se souvienne des interactions précédentes
3. **Combiner avec la recherche web** pour obtenir les dernières informations sur les invités inconnus
4. **Intégrer plusieurs index** pour obtenir des informations plus complètes à partir de sources vérifiées
Maintenant Alfred est entièrement équipé pour gérer sans effort les questions sur les invités, s'assurant que votre gala soit mémorisé comme l'événement le plus sophistiqué et délicieux du siècle !
<Tip>
Essayez d'étendre l'outil de récupération pour aussi retourner des amorces de conversation basées sur les intérêts ou l'arrière-plan de chaque invité. Comment modifieriez-vous l'outil pour accomplir cela ?
Quand vous avez terminé, implémentez votre outil de récupération d'invités dans le fichier <code>retriever.py</code> du <i>Space</i>.
</Tip>
| agents-course/units/fr/unit3/agentic-rag/invitees.mdx/0 | {
"file_path": "agents-course/units/fr/unit3/agentic-rag/invitees.mdx",
"repo_id": "agents-course",
"token_count": 7170
} | 14 |
# 더미 에이전트 라이브러리 [[dummy-agent-library]]
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-unit1sub3DONE.jpg" alt="Unit 1 planning"/>
이 코스는 특정 프레임워크에 종속되지 않도록 설계되었습니다. 그 이유는 **AI 에이전트의 개념에 집중하고 특정 프레임워크의 세부 사항에 매몰되지 않기 위함입니다**.
또한, 학생들이 이 강의에서 배운 개념을 원하는 프레임워크를 사용해 자신의 프로젝트에 적용할 수 있기를 바랍니다.
따라서 Unit 1에서는 간단한 더미 에이전트 라이브러리와 서버리스 API를 사용하여 LLM 엔진에 접근할 것입니다.
실제 프로덕션 환경에서는 이런 방식을 사용하지 않겠지만, **에이전트의 작동 원리를 이해하는 데 좋은 출발점**이 될 것입니다.
이 섹션을 마치면 `smolagents`를 사용하여 **간단한 에이전트를 만들** 준비가 될 것입니다.
이어지는 Unit에서는 `LangGraph`, `LlamaIndex`와 같은 다른 AI 에이전트 라이브러리도 사용해 볼 예정입니다.
간단하게 하기 위해 도구와 에이전트로 단순한 Python 함수를 사용할 것입니다.
어떤 환경에서도 시도해볼 수 있도록 `datetime`이나 `os`와 같은 내장 Python 패키지를 사용할 것입니다.
[이 노트북](https://huggingface.co/agents-course/notebooks/blob/main/dummy_agent_library.ipynb)에서 과정을 따라가며 **직접 코드를 실행**해볼 수 있습니다.
## 서버리스 API [[serverless-api]]
Hugging Face 생태계에는 다양한 모델에서 쉽게 추론을 실행할 수 있게 해주는 서버리스 API라는 편리한 기능이 있습니다. 별도의 설치나 배포 과정이 필요 없습니다.
```python
import os
from huggingface_hub import InferenceClient
## https://hf.co/settings/tokens에서 토큰이 필요합니다. 토큰 유형으로 'read'를 선택했는지 확인하세요. Google Colab에서 실행할 경우 "설정" 탭의 "시크릿" 아래에서 설정할 수 있습니다. 반드시 "HF_TOKEN"이라고 이름을 지정해야 합니다.
os.environ["HF_TOKEN"]="hf_xxxxxxxxxxxxxx"
client = InferenceClient("meta-llama/Llama-3.2-3B-Instruct")
# 다음 셀의 출력이 올바르지 않다면, 무료 모델이 과부하 상태일 수 있습니다. 대신 Llama-3.2-3B-Instruct가 포함된 이 공개 엔드포인트를 사용할 수 있습니다
# client = InferenceClient("https://jc26mwg228mkj8dw.us-east-1.aws.endpoints.huggingface.cloud")
```
```python
output = client.text_generation(
"The capital of France is",
max_new_tokens=100,
)
print(output)
```
출력:
```
Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris.
```
LLM 섹션에서 보았듯이, 단순 디코딩만 수행하면 **모델은 EOS(End of Sequence) 토큰을 예측할 때만 멈추게 됩니다**. 하지만 여기서는 그런 일이 일어나지 않습니다. 이는 이것이 대화형(채팅) 모델이고 **모델이 기대하는 채팅 템플릿을 적용하지 않았기 때문입니다**.
이제 우리가 사용하는 <a href="https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct">Llama-3.2-3B-Instruct 모델</a>과 관련된 특수 토큰을 추가하면, 동작이 바뀌어 예상대로 EOS가 생성됩니다.
```python
prompt="""<|begin_of_text|><|start_header_id|>user<|end_header_id|>
The capital of France is<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""
output = client.text_generation(
prompt,
max_new_tokens=100,
)
print(output)
```
출력:
```
The capital of France is Paris.
```
"chat" 메서드를 사용하는 것이 채팅 템플릿을 적용하는 훨씬 더 편리하고 안정적인 방법입니다:
```python
output = client.chat.completions.create(
messages=[
{"role": "user", "content": "The capital of France is"},
],
stream=False,
max_tokens=1024,
)
print(output.choices[0].message.content)
```
출력:
```
Paris.
```
chat 메서드는 모델 간 원활한 전환을 보장하기 위해 권장되는 방법이지만, 이 노트북은 교육용이므로 세부 내용을 이해하기 위해 계속해서 "text_generation" 메서드를 사용하겠습니다.
## 더미 에이전트 [[dummy-agent]]
앞 섹션에서 보았듯이, 에이전트 라이브러리의 핵심은 시스템 프롬프트에 정보를 추가하는 것입니다.
이 시스템 프롬프트는 앞서 본 것보다 조금 더 복잡하지만, 이미 다음과 같은 내용을 포함하고 있습니다:
1. **도구에 관한 정보**
2. **사이클 지시사항** (생각(Thought) → 행동(Action) → 관찰(Observation))
```
Answer the following questions as best you can. You have access to the following tools:
get_weather: Get the current weather in a given location
The way you use the tools is by specifying a json blob.
Specifically, this json should have an `action` key (with the name of the tool to use) and an `action_input` key (with the input to the tool going here).
The only values that should be in the "action" field are:
get_weather: Get the current weather in a given location, args: {"location": {"type": "string"}}
example use :
{{
"action": "get_weather",
"action_input": {"location": "New York"}
}}
ALWAYS use the following format:
Question: the input question you must answer
Thought: you should always think about one action to take. Only one action at a time in this format:
Action:
$JSON_BLOB (inside markdown cell)
Observation: the result of the action. This Observation is unique, complete, and the source of truth.
... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $JSON_BLOB must be formatted as markdown and only use a SINGLE action at a time.)
You must always end your output with the following format:
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer.
```
"text_generation" 메서드를 사용하고 있으므로 프롬프트를 수동으로 적용해야 합니다:
```python
prompt=f"""<|begin_of_text|><|start_header_id|>system<|end_header_id|>
{SYSTEM_PROMPT}
<|eot_id|><|start_header_id|>user<|end_header_id|>
What's the weather in London ?
<|eot_id|><|start_header_id|>assistant<|end_header_id|>
"""
```
다음과 같이 할 수도 있습니다. 이는 `chat` 메서드 내부에서 일어나는 일입니다:
```python
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": "What's the weather in London ?"},
]
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-3B-Instruct")
tokenizer.apply_chat_template(messages, tokenize=False,add_generation_prompt=True)
```
이제 프롬프트는 다음과 같습니다:
```
<|begin_of_text|><|start_header_id|>system<|end_header_id|>
Answer the following questions as best you can. You have access to the following tools:
get_weather: Get the current weather in a given location
The way you use the tools is by specifying a json blob.
Specifically, this json should have an `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).
The only values that should be in the "action" field are:
get_weather: Get the current weather in a given location, args: {"location": {"type": "string"}}
example use :
{{
"action": "get_weather",
"action_input": {"location": "New York"}
}}
ALWAYS use the following format:
Question: the input question you must answer
Thought: you should always think about one action to take. Only one action at a time in this format:
Action:
$JSON_BLOB (inside markdown cell)
Observation: the result of the action. This Observation is unique, complete, and the source of truth.
... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $JSON_BLOB must be formatted as markdown and only use a SINGLE action at a time.)
You must always end your output with the following format:
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer.
<|eot_id|><|start_header_id|>user<|end_header_id|>
What's the weather in London ?
<|eot_id|><|start_header_id|>assistant<|end_header_id|>
```
이제 디코딩을 해봅시다!
```python
output = client.text_generation(
prompt,
max_new_tokens=200,
)
print(output)
```
출력:
````
Thought: I will check the weather in London.
Action:
```
{
"action": "get_weather",
"action_input": {"location": "London"}
}
```
Observation: The current weather in London is mostly cloudy with a high of 12°C and a low of 8°C.
````
문제가 보이시나요?
>모델이 실제 데이터 없이 허구의 답변을 만들어냈습니다. 실제 함수를 실행하려면 여기서 생성을 중단해야 합니다!
이제 허구의 응답이 생성되지 않도록 "Observation:" 부분 전에 생성을 중지해 봅시다.
```python
output = client.text_generation(
prompt,
max_new_tokens=200,
stop=["Observation:"] # 실제 함수가 호출되기 전에 중단합니다
)
print(output)
```
출력:
````
Thought: I will check the weather in London.
Action:
```
{
"action": "get_weather",
"action_input": {"location": "London"}
}
```
Observation:
````
훨씬 좋아졌습니다!
이제 간단한 날씨 정보 제공 함수를 만들어 봅시다. 실제 상황에서는 API를 호출하게 될 것입니다.
```python
# 더미 함수
def get_weather(location):
return f"the weather in {location} is sunny with low temperatures. \n"
get_weather('London')
```
출력:
```
'the weather in London is sunny with low temperatures. \n'
```
이제 기본 프롬프트, 함수 실행까지의 출력, 그리고 함수 실행 결과를 관찰 결과로 연결한 다음 생성을 계속해 봅시다.
```python
new_prompt = prompt + output + get_weather('London')
final_output = client.text_generation(
new_prompt,
max_new_tokens=200,
)
print(final_output)
```
새로운 프롬프트는 다음과 같습니다:
````
<|begin_of_text|><|start_header_id|>system<|end_header_id|>
Answer the following questions as best you can. You have access to the following tools:
get_weather: Get the current weather in a given location
The way you use the tools is by specifying a json blob.
Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).
The only values that should be in the "action" field are:
get_weather: Get the current weather in a given location, args: {"location": {"type": "string"}}
example use :
{{
"action": "get_weather",
"action_input": {"location": "New York"}
}}
ALWAYS use the following format:
Question: the input question you must answer
Thought: you should always think about one action to take. Only one action at a time in this format:
Action:
$JSON_BLOB (inside markdown cell)
Observation: the result of the action. This Observation is unique, complete, and the source of truth.
... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $JSON_BLOB must be formatted as markdown and only use a SINGLE action at a time.)
You must always end your output with the following format:
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer.
<|eot_id|><|start_header_id|>user<|end_header_id|>
What's the weather in London ?
<|eot_id|><|start_header_id|>assistant<|end_header_id|>
Thought: I will check the weather in London.
Action:
```
{
"action": "get_weather",
"action_input": {"location": {"type": "string", "value": "London"}
}
```
Observation:the weather in London is sunny with low temperatures.
````
출력:
```
Final Answer: The weather in London is sunny with low temperatures.
```
---
지금까지 Python 코드를 사용하여 처음부터 에이전트를 만드는 방법을 배웠고, **그 과정이 얼마나 번거로울 수 있는지** 확인했습니다. 다행히 많은 에이전트 라이브러리들이 이러한 작업을 단순화하여 복잡한 부분을 대신 처리해 줍니다.
이제 `smolagents` 라이브러리를 사용하여 **첫 번째 실제 에이전트를 만들** 준비가 되었습니다. | agents-course/units/ko/unit1/dummy-agent-library.mdx/0 | {
"file_path": "agents-course/units/ko/unit1/dummy-agent-library.mdx",
"repo_id": "agents-course",
"token_count": 6652
} | 15 |
# Что такое вызов функции?
Вызов функций - это **способ, с помощью которого LLM может выполнять действия в своем окружении**. Впервые он был [введен в GPT-4](https://openai.com/index/function-calling-and-other-api-updates/), и затем был воспроизведен в других моделях.
Как и инструменты агента, вызов функций дает модели возможность **осуществлять действия в своем окружении**. Однако способность к вызову функций **приобретается моделью в процессе обучения**, и она **меньше зависит от подсказок, чем другие техники агентов**.
В разделе 1 Агент **не учился использовать инструменты**, мы просто предоставили список, и мы полагались на то, что модель **способна обобщить определение плана с помощью этих инструментов**.
В то время как здесь агент **дообучается (тренируется) использовать инструменты с помощью вызова функций**.
## Как модель "учится" выполнять то или иное действие?
В первом разделе мы рассмотрели общий процесс работы агента. После того как пользователь предоставит агенту некоторые инструменты и сформулирует запрос, модель выполнит следующий цикл:
1. *Рассуждение* : Какое действие (действия) мне нужно предпринять, чтобы выполнить поставленную задачу.
2. *Действие* : Сформирует действие с нужным параметром и остановите генерацию.
3. *Наблюдение* : Получить результат выполнения.
В "типичном" диалоге с моделью через API, диалог будет чередоваться сообщениями пользователя и ассистента следующим образом:
```python
conversation = [
{"role": "user", "content": "Мне нужна помощь с моим заказом"},
{"role": "assistant", "content": "Я буду рад помочь. Не могли бы вы сообщить номер вашего заказа?"},
{"role": "user", "content": "Это ЗАКАЗ-123"},
]
```
Вызов функций привносит **новые роли в диалог**!
1. Одна новая роль для **Действия**
2. Одна новая роль для **Наблюдения**
Если мы возьмем [Mistral API](https://docs.mistral.ai/capabilities/function_calling/) в качестве примера, это будет выглядеть так:
```python
conversation = [
{
"role": "user",
"content": "Каков статус моей транзакции T1001?"
},
{
"role": "assistant",
"content": "",
"function_call": {
"name": "retrieve_payment_status",
"arguments": "{\"transaction_id\": \"T1001\"}"
}
},
{
"role": "tool",
"name": "retrieve_payment_status",
"content": "{\"status\": \"Paid\"}"
},
{
"role": "assistant",
"content": "Ваша транзакция T1001 была успешно оплачена."
}
]
```
> ... Но вы сказали, что есть новая роль для вызова функций?
**Да и нет**, в этом случае, как и во многих других API, модель форматирует действие, которое нужно выполнить, как сообщение "ассистенту". Затем шаблон чата представит это в виде **специальных токенов** для вызова функций.
- `[AVAILABLE_TOOLS]` - начать список доступных инструментов
- `[/AVAILABLE_TOOLS]` - завершить список доступных инструментов
- `[TOOL_CALLS]` - Сделать вызов инструмента (т.е. выполнить "Действие")
- `[TOOL_RESULTS]` - "Наблюдать" результат действия
- `[/TOOL_RESULTS]` - Завершение наблюдение (т.е. модель может снова декодировать)
Мы еще поговорим о вызовах функций в этом курсе, но если вы хотите погрузиться глубже, то можете ознакомиться с [этим отличным разделом документации](https://docs.mistral.ai/capabilities/function_calling/)
---
Теперь, когда мы узнали, что такое вызов функций и как он работает, давайте **добавим некоторые возможности вызова функций к модели, которая еще не имеет таких возможностей**: **"google/gemma-2-2b-it"**, добавив в модель несколько новых специальных токенов.
Чтобы сделать это, **нам нужно сначала понять, что такое дообучение и LoRA**. | agents-course/units/ru-RU/bonus-unit1/what-is-function-calling.mdx/0 | {
"file_path": "agents-course/units/ru-RU/bonus-unit1/what-is-function-calling.mdx",
"repo_id": "agents-course",
"token_count": 3361
} | 16 |
# Небольшой тест (не оценивается) [[quiz1]]
До этого момента вы понимали общую картину Агентов, что они собой представляют и как работают. Пришло время провести небольшой тест, поскольку **проверка себя** - это лучший способ учиться и [избежать иллюзии компетентности](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf). Это поможет вам определить, **где вам нужно подтянуть свои знания**.
Это необязательный тест, и он не на что не влияет.
### Вопрос 1: Что такое агент?
Что из перечисленного ниже лучше всего описывает AI Агента?
<Question
choices={[
{
text: "Система, которая обрабатывает только статичный текст и не взаимодействует с окружающей средой.",
explain: "Агент должен уметь совершать действия и взаимодействовать с окружением.",
},
{
text: "AI модель, которая может рассуждать, планировать и использовать инструменты взаимодействуя с окружающей средой для достижения определенной цели.",
explain: "Это определение отражает основные характеристики Агента.",
correct: true
},
{
text: "Чатбот, который отвечает на вопросы, не имея возможности выполнять какие-либо действия.",
explain: "У такого чатбота нет возможности выполнять действия, что отличает его от агента.",
},
{
text: "Цифровая энциклопедия, которая предоставляет информацию, но не может выполнять задания.",
explain: "Агент активно взаимодействует с окружением, а не просто предоставляет статичную информацию.",
}
]}
/>
---
### Вопрос 2: Какова роль планирования в работе Агента?
Почему Агенту необходимо планировать, прежде чем предпринимать какие-либо действия?
<Question
choices={[
{
text: "Чтобы запомнить предыдущие взаимодействия.",
explain: "Планирование - это определение будущих действий, а не запоминание прошлых взаимодействий.",
},
{
text: "Чтобы определить последовательность действий и выбрать соответствующие инструменты, необходимые для выполнения запроса пользователя.",
explain: "Планирование помогает агенту определить оптимальные шаги и инструменты для выполнения задачи.",
correct: true
},
{
text: "Чтобы генерировать случайные действия без какой-либо цели.",
explain: "Планирование гарантирует, что действия Агента будут намеренными, а не случайными.",
},
{
text: "Перевод текста без дополнительных рассуждений.",
explain: "Планирование - это структурирование действий, а не просто преобразование текста.",
}
]}
/>
---
### Вопрос 3: Как Инструменты расширяют Возможности Агента?
Почему инструменты необходимы Агенту?
<Question
choices={[
{
text: "Инструменты - это избыточные компоненты, которые не влияют на работу агента.",
explain: "Инструменты расширяют возможности Агента, позволяя ему выполнять действия, выходящие за рамки генерации текста.",
},
{
text: "Инструменты предоставляют агенту возможность выполнять действия, которые модель генерации текста не может выполнить в естественных условиях, например, готовить кофе или генерировать изображения.",
explain: "Инструменты позволяют агентам взаимодействовать с реальным миром и выполнять задачи.",
correct: true
},
{
text: "Инструменты используются исключительно для хранения памяти.",
explain: "Инструменты предназначены в первую очередь для выполнения действий, а не только для хранения данных.",
},
{
text: "Инструменты ограничивают Агента только текстовыми ответами.",
explain: "Напротив, инструменты позволяют агентам выходить за рамки текстовых ответов.",
}
]}
/>
---
### Вопрос 4: Чем Действия отличаются от Инструментов?
В чем ключевое различие между Действиями и Инструментами?
<Question
choices={[
{
text: "Действия - это шаги, которые выполняет агент, а Инструменты - это внешние ресурсы, которые агент может использовать для выполнения этих действий.",
explain: "Действия - это цели более высокого уровня, а Инструменты - это конкретные функции, к которым может обратиться агент.",
correct: true
},
{
text: "Действия и Инструменты - это одно и то же, и их можно использовать как взаимозаменяемые.",
explain: "Нет, Действия - это цели или задачи, а Инструменты - это конкретные утилиты, которые агент использует для их достижения.",
},
{
text: "Инструменты - это общее, а Действия - только для физических взаимодействий.",
explain: "Не обязательно. Действия могут включать как цифровые, так и физические задачи.",
},
{
text: "Действия требуют LLM, а инструменты - нет.",
explain: "Хотя LLM помогают принимать решения о действиях, сами действия не зависят от LLM.",
}
]}
/>
---
### Вопрос 5: Какую роль играют Большие Языковые Модели (LLM) в Агентах?
Какой вклад вносят LLM в функциональность агента?
<Question
choices={[
{
text: "LLM используются в качестве статических баз данных, которые хранят не предобработанную информацию.",
explain: "LLM не просто хранят информацию, а активно предобработывают вводимый текст и генерируют ответы.",
},
{
text: "LLM служат 'мозгом' агента, обеспечивая предобработку (препроцессинг) текстовых данных для понимания инструкций и планирования действий.",
explain: "LLM позволяют агенту интерпретировать, планировать и принимать решения о дальнейших действиях.",
correct: true
},
{
text: "LLM используются только для предобработки (препроцессинга) изображений, но не для работы с текстом.",
explain: "LLM в основном работают с текстом, хотя иногда могут взаимодействовать с мультимодальными данными.",
},
{
text: "LLM не используются.",
explain: "LLM являются основным компонентом современных AI агентов.",
}
]}
/>
---
### Вопрос 6: Что из перечисленного ниже лучше всего демонстрирует AI Агент?
Какой пример из реального мира лучше всего иллюстрирует работу AI Агента?
<Question
choices={[
{
text: "Статическая страница FAQ на веб-сайте.",
explain: "Статическая страница FAQ не взаимодействует с пользователями динамически и не предпринимает никаких действий.",
},
{
text: "Виртуальный помощник, подобный Siri или Alexa, который может понимать устные команды, рассуждать на их основе и выполнять такие задачи, как установка напоминаний или отправка сообщений.",
explain: "Этот пример включает в себя рассуждения, планирование и взаимодействие с окружением.",
correct: true
},
{
text: "Базовый калькулятор, выполняющий арифметические операции.",
explain: "Калькулятор следует фиксированным правилам, не рассуждая и не планируя, поэтому он не является агентом.",
},
{
text: "NPC в видеоигре, который следует записанным в сценарии реакциям.",
explain: "Если NPC не может рассуждать, планировать и использовать инструменты, он не может функционировать как AI Агент.",
}
]}
/>
---
Поздравляем вас с окончанием теста 🥳, если вы пропустили какие-то элементы, найдите время прочитать главу еще раз, чтобы закрепить свои знания. Если вы справились с тестом, значит, вы готовы глубже погрузиться в «мозг агента»: LLM.
| agents-course/units/ru-RU/unit1/quiz1.mdx/0 | {
"file_path": "agents-course/units/ru-RU/unit1/quiz1.mdx",
"repo_id": "agents-course",
"token_count": 6529
} | 17 |
# Làm quen: Những bước đầu tiên ⛵
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/time-to-onboard.jpg" alt="Đến lúc làm quen" width="100%"/>
Giờ bạn đã nắm rõ thông tin, hãy bắt đầu thôi! Chúng mình sẽ thực hiện 4 bước sau:
1. **Tạo tài khoản Hugging Face** nếu chưa có
2. **Tham gia Discord và giới thiệu bản thân** (đừng ngại nhé 🤗)
3. **Theo dõi khóa học Hugging Face Agents** trên Hub
4. **Lan tỏa thông tin** về khóa học
### Bước 1: Tạo Hugging Face Account
(Nếu bạn chưa có) tạo tài khoản Hugging Face <a href='https://huggingface.co/join' target='_blank'>tại đây</a>.
### Bước 2: Tham gia Cộng đồng Discord
👉🏻 Tham gia Discord server của chúng mình <a href="https://discord.gg/UrrTSsSyjb" target="_blank">tại đây.</a>
Khi tham gia, hãy giới thiệu bản thân trong kênh `#introduce-yourself`.
Chúng ta có nhiều kênh liên quan đến AI Agent:
- `agents-course-announcements`: cho **thông tin mới nhất về khóa học**.
- `🎓-agents-course-general`: để **thảo luận chung và trò chuyện**.
- `agents-course-questions`: để **đặt câu hỏi và giúp đỡ bạn học**.
- `agents-course-showcase`: để **khoe những agent xuất sắc nhất**.
Bạn cũng có thể xem thêm:
- `smolagents`: để **thảo luận và hỗ trợ về thư viện**.
Nếu đây là lần đầu dùng Discord, chúng mình có viết hướng dẫn Discord 101. Xem [phần tiếp theo](discord101).
### Bước 3: Theo dõi group khóa học Hugging Face Agent
Cập nhật tài liệu, thông tin mới nhất và thông báo **bằng cách theo dõi group khóa học Hugging Face Agent**.
👉 Truy cập <a href="https://huggingface.co/agents-course" target="_blank">đây</a> và bấm **follow**.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/hf_course_follow.gif" alt="Theo dõi" width="100%"/>
### Bước 4: Lan tỏa thông tin về khóa học
Hãy giúp khóa học tiếp cận nhiều người hơn! Bạn có thể hỗ trợ bằng 2 cách:
1. Thể hiện sự ủng hộ bằng cách thêm ⭐ <a href="https://github.com/huggingface/agents-course" target="_blank">trang github của khóa học</a>.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/please_star.gif" alt="Repo star"/>
2. Hãy chia sẻ hành trình học tập của bạn: **Thông báo rằng bạn đang tham gia khóa học này**! Chúng mình đã chuẩn bị hình minh họa để bạn dùng trong mạng xã hội
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/share.png">
Tải hình ảnh bằng cách bấm 👉 [tại đây](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/share.png?download=true)
Chúc mừng! 🎉 **Bạn đã hoàn thành quá trình làm quen**! Giờ bạn đã sẵn sàng khám phá về AI Agents. Học vui nhé!
Học mãi, giữ vững tinh thần nhé 🤗 | agents-course/units/vi/unit0/onboarding.mdx/0 | {
"file_path": "agents-course/units/vi/unit0/onboarding.mdx",
"repo_id": "agents-course",
"token_count": 1752
} | 18 |
# 启航准备:开启学习之旅 ⛵
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/time-to-onboard.jpg" alt="启程时刻" width="100%"/>
万事俱备,即刻启程!请完成以下四个步骤:
1. **注册 Hugging Face 账户**(如未完成)
2. **加入 Discord 社区并自我介绍**(无需拘谨 🤗)
3. **在 Hub 平台关注智能体课程**
4. **助力课程推广**
### 步骤一:创建 Hugging Face 账户
(如未注册)请点击<a href='https://huggingface.co/join' target='_blank'>此处</a>创建账户
### 步骤二:加入 Discord 学习社区
👉🏻 点击<a href="https://discord.gg/UrrTSsSyjb" target="_blank">此链接</a>加入服务器
加入后请至 `#introduce-yourself` 频道完成自我介绍
我们设有多个 AI 智能体专属频道:
- `agents-course-announcements`:**课程最新动态**发布
- `🎓-agents-course-general`:**日常交流与自由讨论**
- `agents-course-questions`:**答疑互助专区**
- `agents-course-showcase`:**智能体成果展示厅**
另可关注技术研讨频道:
- `smolagents`:**开发库技术交流与支持**
若您是 Discord 新用户,我们准备了《Discord 基础操作指南》供参考,详见[下一章节](discord101)
### 步骤三:关注 Hugging Face 智能体课程组织
通过关注课程组织,实时获取**最新课程资料、更新通知与重要公告**
👉 访问<a href="https://huggingface.co/agents-course" target="_blank">课程主页</a>点击 **Follow**
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/hf_course_follow.gif" alt="关注操作演示" width="100%"/>
### 步骤四:助力课程推广
两种方式支持课程发展:
1. 为课程代码仓库点亮 ⭐ <a href="https://github.com/huggingface/agents-course" target="_blank">GitHub 项目主页</a>
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/please_star.gif" alt="点亮星标"/>
2. 分享学习宣言:使用专属宣传图在社交媒体宣告**你的学习计划**
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/share.png">
点击 👉 [此处](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/share.png?download=true)下载宣传图
### 步骤五:在本地使用Ollama运行模型(如果遇到信用额度问题)
1. 安装Ollama
请按照<a href="https://ollama.com/download" target="_blank">官方说明</a>安装Ollama
2. 本地拉取模型
```bash
ollama pull qwen2:7b #访问ollama网站获取更多模型信息
```
3. 在后台启动Ollama(在一个终端中)
```bash
ollama serve
```
如果遇到`listen tcp 127.0.0.1:11434: bind: address already in use`错误,你可以使用命令`sudo lsof -i :11434`来识别当前占用该端口的进程ID(PID)。若该进程是`ollama`,则可能是上述安装脚本已启动了ollama服务,因此可以跳过此命令直接使用Ollama。
4. 使用`LiteLLMModel`替代`InferenceClientModel`
要在`smolagents`中使用`LiteLLMModel`模块,可运行`pip`命令安装该模块。
```bash
pip install smolagents[litellm]
```
```bash
from smolagents import LiteLLMModel
model = LiteLLMModel(
model_id="ollama_chat/qwen2:7b", # 或尝试其他Ollama支持的模型
api_base="http://127.0.0.1:11434", # 默认的Ollama本地服务器地址
num_ctx=8192,
)
```
5. 为什么这种方式可行?
- Ollama通过`http://localhost:11434`提供一个与OpenAI兼容的API,用于本地模型服务。
- `LiteLLMModel`设计用于与任何支持OpenAI chat/completion API格式的模型进行通信。
- 这意味着你可以无缝地将`InferenceClientModel`替换为`LiteLLMModel`,无需其他代码改动从而实现即插即用的解决方案。
恭喜!🎉 **您已完成启航准备**!现在可以正式开启智能体技术的学习之旅,祝您探索愉快!
保持学习热情,继续闪耀 🤗
| agents-course/units/zh-CN/unit0/onboarding.mdx/0 | {
"file_path": "agents-course/units/zh-CN/unit0/onboarding.mdx",
"repo_id": "agents-course",
"token_count": 2404
} | 19 |
# 什么是大语言模型(LLMs)?
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-check-1.jpg" alt="Unit 1 planning"/>
在上一节中,我们了解到每个智能体都需要一个核心的人工智能模型,而大语言模型 (LLM) 是实现这一目标最常见的 AI 模型类型。
现在,我们将学习什么是大语言模型,以及它们如何为智能体提供动力。
本节将提供一个简洁的技术解释,说明大语言模型的用途。如果你想更深入地了解相关内容,可以参考我们的 <a href="https://huggingface.co/learn/nlp-course/chapter1/1" target="_blank">免费自然语言处理课程</a>。
## 什么是大语言模型?
大语言模型 (LLM) 是一种擅长理解和生成人类语言的人工智能模型。它们通过大量文本数据的训练,能够学习语言中的模式、结构,甚至细微差别。这些模型通常包含数千万甚至更多的参数。
如今,大多数大语言模型都是基于 Transformer 架构构建的 —— 这是一种基于“注意力”算法的深度学习架构。自 2018 年 Google 推出 BERT 以来,这种架构引起了广泛关注。
<figure>
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/transformer.jpg" alt="Transformer"/>
<figcaption>原始的 Transformer 架构如下所示,左侧是编码器(encoder),右侧是解码器(decoder)。
</figcaption>
</figure>
Transformer 有三种类型:
1. **编码器(Encoders)**
基于编码器的 Transformer 接收文本(或其他数据)作为输入,并输出该文本的密集表示(或嵌入)。
- **示例**:Google 的 BERT
- **用例**:文本分类、语义搜索、命名实体识别
- **典型规模**:数百万个参数
2. **解码器(Decoders)**
基于解码器的 Transformer 专注于**逐个生成新令牌以完成序列**。
- **示例**:Meta 的 Llama
- **用例**:文本生成、聊天机器人、代码生成
- **典型规模**:数十亿(按美国用法,即 10^9)个参数
3. **序列到序列(编码器-解码器,Seq2Seq(Encoder–Decoder))**
序列到序列的 Transformer _结合_了编码器和解码器。编码器首先将输入序列处理成上下文表示,然后解码器生成输出序列。
- **示例**:T5、BART
- **用例**:翻译、摘要、改写
- **典型规模**:数百万个参数
虽然大语言模型 (LLMs) 有多种形式,但它们通常是基于解码器的模型,拥有数十亿个参数。以下是一些最知名的大语言模型:
| **模型** | **提供商** |
|-----------------------------------|------------------------------------------|
| **Deepseek-R1** | DeepSeek |
| **GPT4** | OpenAI |
| **Llama 3** | Meta(Facebook AI Research) |
| **SmolLM2** | Hugging Face |
| **Gemma** | Google |
| **Mistral** | Mistral |
大语言模型 (LLM) 的基本原理简单却极其有效:**其目标是在给定一系列前一个令牌的情况下,预测下一个令牌**。这里的“令牌”是 LLM 处理信息的基本单位。你可以把“令牌”想象成“单词”,但出于效率考虑,LLM 并不直接使用整个单词。
例如,虽然英语估计有 60 万个单词,但一个 LLM 的词汇表可能只有大约 32,000 个令牌(如 Llama 2 的情况)。令牌化通常作用于可以组合的子词单元。
举个例子,考虑如何将令牌“interest”和“ing”组合成“interesting”,或者添加“ed”形成“interested”。
你可以在下面的交互式游乐场中尝试不同的令牌化器来实验:
<iframe
src="https://agents-course-the-tokenizer-playground.static.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
每个大语言模型 (LLM) 都有一些特定于该模型的**特殊令牌**。LLM 使用这些令牌来开启和关闭其生成过程中的结构化组件。例如,用于指示序列、消息或响应的开始或结束。此外,我们传递给模型的输入提示也使用特殊令牌进行结构化。其中最重要的是**序列结束令牌** (EOS,End of Sequence token)。
不同模型提供商使用的特殊令牌形式差异很大。
下表展示了特殊令牌的多样性:
<table>
<thead>
<tr>
<th><strong>Model</strong></th>
<th><strong>Provider</strong></th>
<th><strong>EOS Token</strong></th>
<th><strong>Functionality</strong></th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>GPT4</strong></td>
<td>OpenAI</td>
<td><code><|endoftext|></code></td>
<td>End of message text</td>
</tr>
<tr>
<td><strong>Llama 3</strong></td>
<td>Meta (Facebook AI Research)</td>
<td><code><|eot_id|></code></td>
<td>End of sequence</td>
</tr>
<tr>
<td><strong>Deepseek-R1</strong></td>
<td>DeepSeek</td>
<td><code><|end_of_sentence|></code></td>
<td>End of message text</td>
</tr>
<tr>
<td><strong>SmolLM2</strong></td>
<td>Hugging Face</td>
<td><code><|im_end|></code></td>
<td>End of instruction or message</td>
</tr>
<tr>
<td><strong>Gemma</strong></td>
<td>Google</td>
<td><code><end_of_turn></code></td>
<td>End of conversation turn</td>
</tr>
</tbody>
</table>
<Tip>
我们并不期望你记住这些特殊令牌,但重要的是要理解它们的多样性以及它们在大语言模型 (LLM) 文本生成中所扮演的角色。如果你想了解更多关于特殊令牌的信息,可以查看模型在其 Hub 仓库中的配置。例如,你可以在[SmolLM2 模型的 tokenizer_config.json 文件](https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct/blob/main/tokenizer_config.json)中找到该模型的特殊令牌。
</Tip>
## 理解下一个词元预测
大语言模型 (LLM) 被认为是**自回归**的,这意味着**一次通过的输出成为下一次的输入**。这个循环持续进行,直到模型预测下一个词元为 EOS(结束符)词元,此时模型可以停止。
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AutoregressionSchema.gif" alt="自回归解码的视觉 GIF 图" width="60%">
换句话说,LLM 会解码文本,直到达到 EOS。但在单个解码循环中会发生什么?
虽然对于学习智能体而言,整个过程可能相当技术化,但以下是简要概述:
- 一旦输入文本被**词元化**,模型就会计算序列的表示,该表示捕获输入序列中每个词元的意义和位置信息。
- 这个表示被输入到模型中,模型输出分数,这些分数对词汇表中每个词元作为序列中下一个词元的可能性进行排名。
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/DecodingFinal.gif" alt="解码的视觉 GIF 图" width="60%">
基于这些分数,我们有多种策略来选择词元以完成句子。
- 最简单的解码策略是总是选择分数最高的词元。
您可以在此 Space 中使用 SmolLM2 自己与解码过程进行交互(记住,它会一直解码,直到达到 **EOS** 词元,对于这个模型来说,EOS 词元是**<|im_end|>**):
<iframe
src="https://agents-course-decoding-visualizer.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
- 但还有更先进的解码策略。例如, *束搜索(beam search)* 会探索多个候选序列,以找到总分数最高的序列——即使其中一些单个词元的分数较低。
<iframe
src="https://agents-course-beam-search-visualizer.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
如果你想了解更多关于解码的信息,可以查看[NLP 课程](https://huggingface.co/learn/nlp-course)。
## 注意力机制就是你的全部所需
Transformer 架构的一个关键方面是**注意力机制**。在预测下一个词时,句子中的每个词并不是同等重要的;例如,在句子 *“The capital of France is ...”* 中,“France” 和 “capital” 这样的词携带了最多的意义。
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AttentionSceneFinal.gif" alt="注意力机制的视觉 GIF 图" width="60%">
这种识别最相关词以预测下一个词元的过程已被证明是非常有效的。
尽管自 GPT-2 以来,大语言模型(LLM)的基本原理——预测下一个词元——一直保持不变,但在扩展神经网络以及使注意力机制能够处理越来越长的序列方面已经取得了显著进展。
如果你与大语言模型交互过,你可能对*上下文长度*这个术语很熟悉,它指的是大语言模型能够处理的最大词元数,以及其最大的*注意力跨度*。
## 提示大语言模型很重要
考虑到大语言模型(LLM)的唯一工作是通过查看每个输入词元来预测下一个词元,并选择哪些词元是“重要的”,因此你提供的输入序列的措辞非常重要。
你提供给大语言模型的输入序列被称为*提示*。精心设计提示可以更容易地**引导大语言模型的生成朝着期望的输出方向进行**。
## 大语言模型是如何训练的?
大语言模型是在大型文本数据集上进行训练的,它们通过自监督或掩码语言建模目标来学习预测序列中的下一个词。
通过这种无监督学习,模型学习了语言的结构以及**文本中的潜在模式,使模型能够泛化到未见过的数据**。
在这个初始的*预训练*之后,大语言模型可以在监督学习目标上进行微调,以执行特定任务。例如,一些模型被训练用于对话结构或工具使用,而其他模型则专注于分类或代码生成。
## 我如何使用大语言模型?
你有两个主要选择:
1. **本地运行**(如果你有足够的硬件资源)。
2. **使用云服务/API**(例如,通过Hugging Face的无服务器推理API)。
在本课程中,我们将主要通过 Hugging Face Hub 上的 API 使用模型。稍后,我们将探讨如何在你的本地硬件上运行这些模型。
## 大语言模型在 AI 智能体中是如何使用的?
大语言模型是AI智能体的关键组件,**为理解和生成人类语言提供了基础**。
它们可以解释用户指令,在对话中保持上下文,制定计划并决定使用哪些工具。
我们将在本单元中更详细地探讨这些步骤,但现在你需要理解的是,大语言模型是**智能体的大脑**。
---
那信息量可真不小!我们已经涵盖了大语言模型(LLM)的基本概念、工作原理以及它们在驱动AI智能体中的作用。
如果你想更深入地探索语言模型和自然语言处理这个迷人的世界,不妨查看我们的<a href="https://huggingface.co/learn/nlp-course/chapter1/1" target="_blank">免费 NLP 课程</a>。
现在我们已经了解了大语言模型的工作原理,接下来是时候看看**大语言模型如何在对话语境中构建其生成内容**了。
要运行<a href="https://huggingface.co/agents-course/notebooks/blob/main/unit1/dummy_agent_library.ipynb" target="_blank">这个笔记本</a>,**你需要一个 Hugging Face 令牌**,你可以从<a href="https://hf.co/settings/tokens" target="_blank"> https://hf.co/settings/tokens </a>获取。
有关如何运行 Jupyter Notebook 的更多信息,请查看<a href="https://huggingface.co/docs/hub/notebooks"> Hugging Face Hub 上的 Jupyter Notebook</a>。
你还需要请求访问<a href="https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct" target="_blank"> Meta Llama 模型</a>。
| agents-course/units/zh-CN/unit1/what-are-llms.mdx/0 | {
"file_path": "agents-course/units/zh-CN/unit1/what-are-llms.mdx",
"repo_id": "agents-course",
"token_count": 7759
} | 20 |
# 快速自测(不计分)[[quiz2]]
什么?!又是测验?我们知道,我们知道...😅但这个简短的不计分测验是为了**帮助您巩固刚学到的关键概念**。
本测验涵盖智能体工作流程和交互——这些是构建高效AI智能体的核心组件。
### Q1: AgentWorkflow 在 LlamaIndex 中的主要作用是什么?
<Question
choices={[
{
text: "运行一个或多个带有工具的智能体",
explain: "正确,AgentWorkflow 是快速创建包含一个或多个智能体的系统的主要方式。",
correct: true
},
{
text: "创建没有记忆功能的单一数据查询智能体",
explain: "错误,AgentWorkflow 的功能远不止于此,QueryEngine 才是用于简单数据查询的组件。",
},
{
text: "自动为智能体构建工具",
explain: "AgentWorkflow 不负责构建工具,这是开发者的职责。",
},
{
text: "管理智能体记忆和状态",
explain: "管理记忆和状态并非 AgentWorkflow 的主要功能。",
}
]}
/>
---
### Q2: 哪个对象用于跟踪工作流的状态?
<Question
choices={[
{
text: "State",
explain: "State 不是用于管理工作流状态的正确对象。",
},
{
text: "Context",
explain: "Context 是用于跟踪工作流状态的正确对象。",
correct: true
},
{
text: "WorkflowState",
explain: "WorkflowState 不是正确对象。",
},
{
text: "Management",
explain: "Management 不是有效的工作流状态管理对象。",
}
]}
/>
---
### Q3: 如果希望智能体记住之前的交互,应该使用哪个方法?
<Question
choices={[
{
text: "run(query_str)",
explain: ".run(query_str) 不会维护对话历史记录。",
},
{
text: "chat(query_str, ctx=ctx)",
explain: "chat() 不是工作流的有效方法。",
},
{
text: "interact(query_str)",
explain: "interact() 不是智能体交互的有效方法。",
},
{
text: "run(query_str, ctx=ctx)",
explain: "通过传入并维护上下文,我们可以保持状态!",
correct: true
}
]}
/>
---
### Q4: Agentic RAG 的关键特性是什么?
<Question
choices={[
{
text: "只能使用基于文档的工具在 RAG 工作流中回答问题",
explain: "Agentic RAG 可以使用不同的工具,包括基于文档的工具。",
},
{
text: "像聊天机器人一样无需工具自动回答问题",
explain: "Agentic RAG 确实使用工具来回答问题。",
},
{
text: "可以决定使用任何工具(包括 RAG 工具)来回答问题",
explain: "Agentic RAG 具有使用不同工具回答问题的灵活性。",
correct: true
},
{
text: "仅适用于函数调用智能体",
explain: "Agentic RAG 不局限于函数调用智能体。",
}
]}
/>
---
明白了吗?太棒了!现在让我们**简要回顾一下本单元!**
| agents-course/units/zh-CN/unit2/llama-index/quiz2.mdx/0 | {
"file_path": "agents-course/units/zh-CN/unit2/llama-index/quiz2.mdx",
"repo_id": "agents-course",
"token_count": 1632
} | 21 |
# 创建你的 Gala 智能体
现在我们已经为 Alfred 构建了所有必要组件,是时候将它们整合成一个完整的智能体来协助举办我们的奢华盛会了。
在本节中,我们将把宾客信息检索、网络搜索、天气信息和 Hub 统计工具整合成一个强大的智能体。
## 组装 Alfred:完整智能体
我们不需要重新实现之前章节创建的所有工具,只需从保存的tools.py和retriever.py模块中导入它们即可。
<Tip>
如果你尚未实现这些工具,请返回<a href="./tools">工具</a>和<a href="./invitees">检索器</a>章节进行实现,并将它们添加到`tools.py`和`retriever.py`文件中。
</Tip>
让我们从之前章节导入必要的库和工具:
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
# 导入必要的库
import random
from smolagents import CodeAgent, InferenceClientModel
# 从自定义模块导入工具
from tools import DuckDuckGoSearchTool, WeatherInfoTool, HubStatsTool
from retriever import load_guest_dataset
```
现在让我们将所有工具组合成一个智能体:
```python
# 初始化 Hugging Face 模型
model = InferenceClientModel()
# 初始化网络搜索工具
search_tool = DuckDuckGoSearchTool()
# 初始化天气工具
weather_info_tool = WeatherInfoTool()
# 初始化 Hub 统计工具
hub_stats_tool = HubStatsTool()
# 加载宾客数据集并初始化宾客信息工具
guest_info_tool = load_guest_dataset()
# 创建包含所有工具的 Alfred
alfred = CodeAgent(
tools=[guest_info_tool, weather_info_tool, hub_stats_tool, search_tool],
model=model,
add_base_tools=True, # 添加额外的基础工具
planning_interval=3 # 每 3 步启用规划
)
```
</hfoption>
<hfoption id="llama-index">
```python
# 导入必要库
from llama_index.core.agent.workflow import AgentWorkflow
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
from tools import search_tool, weather_info_tool, hub_stats_tool
from retriever import guest_info_tool
```
Now, let's combine all these tools into a single agent:
```python
# 初始化 Hugging Face 模型
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
# 创建包含所有工具的 Alfred
alfred = AgentWorkflow.from_tools_or_functions(
[guest_info_tool, search_tool, weather_info_tool, hub_stats_tool],
llm=llm,
)
```
</hfoption>
<hfoption id="langgraph">
```python
from typing import TypedDict, Annotated
from langgraph.graph.message import add_messages
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage
from langgraph.prebuilt import ToolNode
from langgraph.graph import START, StateGraph
from langgraph.prebuilt import tools_condition
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
from tools import DuckDuckGoSearchRun, weather_info_tool, hub_stats_tool
from retriever import guest_info_tool
```
现在将所有工具整合到单一智能体:
```python
# 初始化网络搜索工具
search_tool = DuckDuckGoSearchRun()
# 生成包含工具的聊天接口
llm = HuggingFaceEndpoint(
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
)
chat = ChatHuggingFace(llm=llm, verbose=True)
tools = [guest_info_tool, search_tool, weather_info_tool, hub_stats_tool]
chat_with_tools = chat.bind_tools(tools)
# 生成 AgentState 和 Agent 图
class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
def assistant(state: AgentState):
return {
"messages": [chat_with_tools.invoke(state["messages"])],
}
## 构建流程图
builder = StateGraph(AgentState)
# 定义节点:执行具体工作
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# 定义边:控制流程走向
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# 如果最新消息需要工具调用,则路由到 tools 节点
# 否则直接响应
tools_condition,
)
builder.add_edge("tools", "assistant")
alfred = builder.compile()
```
</hfoption>
</hfoptions>
您的智能体现已准备就绪!
## 使用 Alfred:端到端示例
现在 Alfred 已配备所有必要工具,让我们看看他如何协助处理晚会中的各种任务。
### 示例 1:查找嘉宾信息
展示 Alfred 如何协助获取嘉宾信息:
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
query = "Tell me about 'Lady Ada Lovelace'"
response = alfred.run(query)
print("🎩 Alfred's Response:")
print(response)
```
预期输出:
```
🎩 Alfred's Response:
根据检索到的信息,Ada Lovelace 女士是位备受尊敬的数学家兼好友。她因在数学和计算领域的开创性工作而闻名,常因其在 Charles Babbage 分析机方面的工作被誉为第一位计算机程序员。她的电子邮箱是 ada.lovelace@example.com。
```
</hfoption>
<hfoption id="llama-index">
```python
query = "Tell me about Lady Ada Lovelace. What's her background?"
response = await alfred.run(query)
print("🎩 Alfred's Response:")
print(response.response.blocks[0].text)
```
预期输出:
```
🎩 Alfred's Response:
Ada Lovelace 女士是英国数学家和作家,以她在 Charles Babbage 分析机方面的工作闻名。她是第一个认识到该机器具有纯计算之外应用潜力的人。
```
</hfoption>
<hfoption id="langgraph">
```python
response = alfred.invoke({"messages": "Tell me about 'Lady Ada Lovelace'"})
print("🎩 Alfred's Response:")
print(response['messages'][-1].content)
```
预期输出:
```
🎩 Alfred's Response:
Ada Lovelace,全名 Augusta Ada King,洛夫莱斯伯爵夫人,是英国数学家和作家。出生于 1815 年 12 月 10 日,逝世于 1852 年 11 月 27 日,她因在 Charles Babbage 提出的机械通用计算机分析机方面的工作而闻名。Ada Lovelace 被誉为第一位计算机程序员,因为她于 1843 年为分析机创建了程序。她认识到该机器的用途不仅限于计算,这种远见在当时极为罕见。她对计算机科学领域的贡献为未来发展奠定了基础。每年十月设立的 Ada Lovelace 日正是为了纪念她在科技领域的开创性工作,激励女性在 STEM 领域的发展。
```
</hfoption>
</hfoptions>
### 示例 2:烟花天气核查
展示 Alfred 如何协助天气查询:
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
query = "What's the weather like in Paris tonight? Will it be suitable for our fireworks display?"
response = alfred.run(query)
print("🎩 Alfred's Response:")
print(response)
```
预期输出(存在随机性差异):
```
🎩 Alfred's Response:
已为您查询巴黎天气。当前天气晴朗,气温 25°C。这样的条件非常适合今晚的烟花表演。晴朗的夜空将为壮观表演提供绝佳能见度,舒适的温度也能确保宾客们愉快享受户外活动。
```
</hfoption>
<hfoption id="llama-index">
```python
query = "What's the weather like in Paris tonight? Will it be suitable for our fireworks display?"
response = await alfred.run(query)
print("🎩 Alfred's Response:")
print(response)
```
预期输出:
```
🎩 Alfred's Response:
巴黎今夜有雨,气温 15°C。考虑到降雨情况,可能不适宜进行烟花表演。
```
</hfoption>
<hfoption id="langgraph">
```python
response = alfred.invoke({"messages": "What's the weather like in Paris tonight? Will it be suitable for our fireworks display?"})
print("🎩 Alfred's Response:")
print(response['messages'][-1].content)
```
预期输出:
```
🎩 Alfred's Response:
巴黎今夜有雨且气温 15°C,可能不适宜您的烟花表演计划。
```
</hfoption>
</hfoptions>
### 示例 3:给 AI 研究者留下深刻印象
展示 Alfred 如何协助与 AI 研究者互动:
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
query = "One of our guests is from Qwen. What can you tell me about their most popular model?"
response = alfred.run(query)
print("🎩 Alfred's Response:")
print(response)
```
预期输出:
```
🎩 Alfred's Response:
Qwen 最受欢迎的模型是 Qwen/Qwen2.5-VL-7B-Instruct,下载量达 3,313,345 次。
```
</hfoption>
<hfoption id="llama-index">
```python
query = "One of our guests is from Google. What can you tell me about their most popular model?"
response = await alfred.run(query)
print("🎩 Alfred's Response:")
print(response)
```
预期输出:
```
🎩 Alfred's Response:
Hugging Face Hub 上 Google 最受欢迎的模型是 google/electra-base-discriminator,下载量达 28,546,752 次。
```
</hfoption>
<hfoption id="langgraph">
```python
response = alfred.invoke({"messages": "One of our guests is from Qwen. What can you tell me about their most popular model?"})
print("🎩 Alfred's Response:")
print(response['messages'][-1].content)
```
预期输出:
```
🎩 Alfred's Response:
Qwen 下载量最高的模型是 Qwen/Qwen2.5-VL-7B-Instruct,下载量达 3,313,345 次。
```
</hfoption>
</hfoptions>
### 示例 4:组合多工具应用
展示 Alfred 如何协助准备与 Nikola Tesla 博士的对话:
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
query = "I need to speak with Dr. Nikola Tesla about recent advancements in wireless energy. Can you help me prepare for this conversation?"
response = alfred.run(query)
print("🎩 Alfred's Response:")
print(response)
```
预期输出:
```
🎩 Alfred's Response:
我已收集信息帮助您准备与 Nikola Tesla 博士的对话。
嘉宾信息:
姓名:Dr. Nikola Tesla
关系:大学时期的老友
描述:他是您大学时期的老友,最近刚获得新型无线能量传输系统的专利,非常乐意与您讨论。请记住他对鸽子情有独钟,这可能是很好的闲聊话题。
邮箱:nikola.tesla@gmail.com
无线能源最新进展:
根据网络搜索,以下是无线能量传输领域的最新发展:
1. 研究人员在使用聚焦电磁波进行远距离无线输电方面取得进展
2. 多家公司正在开发用于消费电子的谐振感应耦合技术
3. 无物理连接的电动汽车充电新应用
对话切入点:
1. "我很想听听您关于无线能量传输新专利的情况,与大学时期的原始概念相比有何改进?"
2. "您是否关注近期消费电子谐振感应耦合技术的发展?对他们的方法有何看法?"
3. "您的鸽子最近好吗?我记得您对它们特别着迷"
这些内容将为您与 Tesla 博士的对话提供充足话题,同时展现您对他兴趣领域和专业发展的了解。
```
</hfoption>
<hfoption id="llama-index">
```python
query = "I need to speak with Dr. Nikola Tesla about recent advancements in wireless energy. Can you help me prepare for this conversation?"
response = await alfred.run(query)
print("🎩 Alfred's Response:")
print(response)
```
预期输出:
```
🎩 Alfred's Response:
以下是您与 Nikola Tesla 博士讨论无线能源时可能有用的最新进展:
1. **无线电力传输的进展与挑战**:本文讨论无线电力传输(WPT)从传统有线方式到现代应用(包括太空太阳能电站)的演变,重点介绍微波技术的初期应用及当前电子设备兴起带来的需求。
2. **面向体表电子设备的无线能量传输技术新进展**:探索无线能量传输(WET)作为无电池/导线供电方案的潜力,讨论其优势及潜在应用场景。
3. **无线电力传输与能量收集:现状与未来趋势**:概述无线供能方法的最新进展,包括能量收集和无线输电技术,展示多个前景应用并探讨领域未来趋势。
4. **无线电力传输:应用、挑战与障碍**
```
</hfoption>
<hfoption id="langgraph">
```python
response = alfred.invoke({"messages":"I need to speak with 'Dr. Nikola Tesla' about recent advancements in wireless energy. Can you help me prepare for this conversation?"})
print("🎩 Alfred's Response:")
print(response['messages'][-1].content)
```
预期输出:
```
根据提供的信息,以下是准备与 'Dr. Nikola Tesla' 讨论无线能源最新进展的关键要点:
1. **无线电力传输 (WPT)**:探讨如何通过消除线缆需求并利用感应和谐振耦合机制革新能量传输
2. **无线充电进展**:强调效率提升、更快充电速度及 Qi/Qi2 认证解决方案的兴起
3. **5G-Advanced 创新与 NearLink 协议**:作为提升无线网络速度、安全性和效率的技术,可支持先进无线能源应用
4. **边缘 AI/ML**:讨论人工智能如何依赖无线网络实现边缘智能化,提升智能家居自动化水平
5. **Matter 标准与安全增强**:作为推动 IoT 设备连接效率和安全性提升的关键创新
6. **无线充电技术突破**:包括仁川国立大学等机构的最新研究成果
```
</hfoption>
</hfoptions>
## 高级功能:对话记忆
为了让 Alfred 在晚会中更智能,我们可以启用对话记忆功能使其记住先前交流:
<hfoptions id="agents-frameworks">
<hfoption id="smolagents">
```python
# 创建带记忆的 Alfred
alfred_with_memory = CodeAgent(
tools=[guest_info_tool, weather_info_tool, hub_stats_tool, search_tool],
model=model,
add_base_tools=True,
planning_interval=3
)
# 首次交互
response1 = alfred_with_memory.run("Tell me about Lady Ada Lovelace.")
print("🎩 Alfred's First Response:")
print(response1)
# 二次交互(引用首次内容)
response2 = alfred_with_memory.run("What projects is she currently working on?", reset=False)
print("🎩 Alfred's Second Response:")
print(response2)
```
</hfoption>
<hfoption id="llama-index">
```python
from llama_index.core.workflow import Context
alfred = AgentWorkflow.from_tools_or_functions(
[guest_info_tool, search_tool, weather_info_tool, hub_stats_tool],
llm=llm
)
# 记忆状态
ctx = Context(alfred)
# 首次交互
response1 = await alfred.run("Tell me about Lady Ada Lovelace.", ctx=ctx)
print("🎩 Alfred's First Response:")
print(response1)
# 二次交互(引用首次内容)
response2 = await alfred.run("What projects is she currently working on?", ctx=ctx)
print("🎩 Alfred's Second Response:")
print(response2)
```
</hfoption>
<hfoption id="langgraph">
```python
# 首次交互
response = alfred.invoke({"messages": [HumanMessage(content="Tell me about 'Lady Ada Lovelace'. What's her background and how is she related to me?")]})
print("🎩 Alfred's Response:")
print(response['messages'][-1].content)
print()
# 二次交互(引用首次内容)
response = alfred.invoke({"messages": response["messages"] + [HumanMessage(content="What projects is she currently working on?")]})
print("🎩 Alfred's Response:")
print(response['messages'][-1].content)
```
</hfoption>
</hfoptions>
注意到这三种智能体框架都没有直接集成记忆模块,这种设计有何特殊考量?🧐
* smolagents:记忆在不同执行周期中不保留,需通过 reset=False 显式声明
* LlamaIndex: 需显式添加 context 对象进行运行周期内的记忆管理
* LangGraph: 提供检索历史消息选项或专用 [MemorySaver](https://langchain-ai.github.io/langgraph/tutorials/introduction/#part-3-adding-memory-to-the-chatbot) 组件
## 结语
恭喜!您已成功构建 Alfred——配备多种工具的智能体助手,可协助举办本世纪最盛大的晚会。Alfred 现在能够:
1. 检索嘉宾详细信息
2. 核查天气条件规划户外活动
3. 提供顶尖 AI 开发者及其模型洞察
4. 网络搜索最新资讯
5. 通过记忆维持对话上下文
凭借这些能力,Alfred 已准备就绪,确保您的晚会取得圆满成功,通过个性化服务和实时信息给宾客留下深刻印象。
| agents-course/units/zh-CN/unit3/agentic-rag/agent.mdx/0 | {
"file_path": "agents-course/units/zh-CN/unit3/agentic-rag/agent.mdx",
"repo_id": "agents-course",
"token_count": 8411
} | 22 |
# Creating apps
| candle/candle-book/src/apps/README.md/0 | {
"file_path": "candle/candle-book/src/apps/README.md",
"repo_id": "candle",
"token_count": 4
} | 23 |
# Advanced Cuda usage
| candle/candle-book/src/inference/cuda/README.md/0 | {
"file_path": "candle/candle-book/src/inference/cuda/README.md",
"repo_id": "candle",
"token_count": 6
} | 24 |
mod benchmarks;
use criterion::criterion_main;
criterion_main!(
benchmarks::affine::benches,
benchmarks::copy::benches,
benchmarks::conv_transpose2d::benches,
benchmarks::matmul::benches,
benchmarks::qmatmul::benches,
benchmarks::random::benches,
benchmarks::reduce::benches,
benchmarks::unary::benches,
benchmarks::where_cond::benches,
);
| candle/candle-core/benches/bench_main.rs/0 | {
"file_path": "candle/candle-core/benches/bench_main.rs",
"repo_id": "candle",
"token_count": 138
} | 25 |
//! Traits to Define Backend Behavior
//!
use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT};
use crate::{CpuStorage, DType, Layout, Result, Shape};
pub trait BackendStorage: Sized {
type Device: BackendDevice;
fn try_clone(&self, _: &Layout) -> Result<Self>;
fn dtype(&self) -> DType;
fn device(&self) -> &Self::Device;
// Maybe this should return a Cow instead so that no copy is done on the cpu case.
fn to_cpu_storage(&self) -> Result<CpuStorage>;
fn affine(&self, _: &Layout, _: f64, _: f64) -> Result<Self>;
fn powf(&self, _: &Layout, _: f64) -> Result<Self>;
fn elu(&self, _: &Layout, _: f64) -> Result<Self>;
fn reduce_op(&self, _: ReduceOp, _: &Layout, _: &[usize]) -> Result<Self>;
fn cmp(&self, _: CmpOp, _: &Self, _: &Layout, _: &Layout) -> Result<Self>;
fn to_dtype(&self, _: &Layout, _: DType) -> Result<Self>;
fn unary_impl<B: UnaryOpT>(&self, _: &Layout) -> Result<Self>;
fn binary_impl<B: BinaryOpT>(&self, _: &Self, _: &Layout, _: &Layout) -> Result<Self>;
fn where_cond(&self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout) -> Result<Self>;
fn conv1d(
&self,
_l: &Layout,
_kernel: &Self,
_kernel_l: &Layout,
_params: &crate::conv::ParamsConv1D,
) -> Result<Self>;
fn conv_transpose1d(
&self,
_l: &Layout,
_kernel: &Self,
_kernel_l: &Layout,
_params: &crate::conv::ParamsConvTranspose1D,
) -> Result<Self>;
fn conv2d(
&self,
_l: &Layout,
_kernel: &Self,
_kernel_l: &Layout,
_params: &crate::conv::ParamsConv2D,
) -> Result<Self>;
fn conv_transpose2d(
&self,
_l: &Layout,
_kernel: &Self,
_kernel_l: &Layout,
_params: &crate::conv::ParamsConvTranspose2D,
) -> Result<Self>;
fn avg_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self>;
fn max_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self>;
fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self>;
fn upsample_nearest2d(&self, _: &Layout, _: usize, _: usize) -> Result<Self>;
fn gather(&self, _: &Layout, _: &Self, _: &Layout, _: usize) -> Result<Self>;
fn scatter_set(
&mut self,
_: &Layout,
_: &Self,
_: &Layout,
_: &Self,
_: &Layout,
_: usize,
) -> Result<()>;
fn scatter_add_set(
&mut self,
_: &Layout,
_: &Self,
_: &Layout,
_: &Self,
_: &Layout,
_: usize,
) -> Result<()>;
fn index_select(&self, _: &Self, _: &Layout, _: &Layout, _: usize) -> Result<Self>;
fn index_add(
&self,
_: &Layout,
_: &Self,
_: &Layout,
_: &Self,
_: &Layout,
_: usize,
) -> Result<Self>;
fn matmul(
&self,
_: &Self,
_: (usize, usize, usize, usize),
_: &Layout,
_: &Layout,
) -> Result<Self>;
fn copy_strided_src(&self, _: &mut Self, _: usize, _: &Layout) -> Result<()>;
#[allow(clippy::too_many_arguments)]
// Similar to cudaMemcpy2D, though values are in elements and not in bytes.
fn copy2d(
&self,
_: &mut Self,
_d1: usize,
_d2: usize,
_src_stride1: usize,
_dst_stride1: usize,
_src_offset: usize,
_dst_offset: usize,
) -> Result<()>;
fn const_set(&mut self, _: crate::scalar::Scalar, _: &Layout) -> Result<()>;
}
pub trait BackendDevice: Sized + std::fmt::Debug + Clone {
type Storage: BackendStorage;
// TODO: Make the usize generic and part of a generic DeviceLocation.
fn new(_: usize) -> Result<Self>;
fn location(&self) -> crate::DeviceLocation;
fn same_device(&self, _: &Self) -> bool;
fn zeros_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage>;
/// # Safety
/// This function is unsafe as it doesn't initialize the underlying data store.
/// The caller should ensure that the data is properly initialized as early as possible
/// after this call.
unsafe fn alloc_uninit(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage>;
fn storage_from_slice<T: crate::WithDType>(&self, _: &[T]) -> Result<Self::Storage>;
fn storage_from_cpu_storage(&self, _: &CpuStorage) -> Result<Self::Storage>;
fn storage_from_cpu_storage_owned(&self, _: CpuStorage) -> Result<Self::Storage>;
fn rand_uniform(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage>;
fn rand_normal(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage>;
fn set_seed(&self, _: u64) -> Result<()>;
/// Synchronize should block until all the operations on the device are completed.
fn synchronize(&self) -> Result<()>;
}
| candle/candle-core/src/backend.rs/0 | {
"file_path": "candle/candle-core/src/backend.rs",
"repo_id": "candle",
"token_count": 2241
} | 26 |
/// Helper functions to plug cuda kernels in candle.
use crate::{Layout, Result, WithDType};
pub use cudarc;
use cudarc::driver::{CudaSlice, DeviceRepr, ValidAsZeroBits};
use super::{CudaDevice, CudaError, WrapErr};
pub type S = super::CudaStorageSlice;
pub trait Map1 {
fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>(
&self,
src: &CudaSlice<T>,
dev: &CudaDevice,
layout: &Layout,
) -> Result<CudaSlice<T>>;
fn map(&self, s: &S, d: &CudaDevice, l: &Layout) -> Result<S> {
let out = match s {
S::U8(s) => S::U8(self.f(s, d, l)?),
S::U32(s) => S::U32(self.f(s, d, l)?),
S::I64(s) => S::I64(self.f(s, d, l)?),
S::BF16(s) => S::BF16(self.f(s, d, l)?),
S::F16(s) => S::F16(self.f(s, d, l)?),
S::F32(s) => S::F32(self.f(s, d, l)?),
S::F64(s) => S::F64(self.f(s, d, l)?),
S::F8E4M3(s) => S::F8E4M3(self.f(s, d, l)?),
};
Ok(out)
}
}
pub trait Map2 {
fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>(
&self,
src1: &CudaSlice<T>,
layout1: &Layout,
src2: &CudaSlice<T>,
layout2: &Layout,
dev: &CudaDevice,
) -> Result<CudaSlice<T>>;
fn map(&self, s1: &S, l1: &Layout, s2: &S, l2: &Layout, d: &CudaDevice) -> Result<S> {
let out = match (s1, s2) {
(S::U8(s1), S::U8(s2)) => S::U8(self.f(s1, l1, s2, l2, d)?),
(S::U32(s1), S::U32(s2)) => S::U32(self.f(s1, l1, s2, l2, d)?),
(S::I64(s1), S::I64(s2)) => S::I64(self.f(s1, l1, s2, l2, d)?),
(S::BF16(s1), S::BF16(s2)) => S::BF16(self.f(s1, l1, s2, l2, d)?),
(S::F16(s1), S::F16(s2)) => S::F16(self.f(s1, l1, s2, l2, d)?),
(S::F32(s1), S::F32(s2)) => S::F32(self.f(s1, l1, s2, l2, d)?),
(S::F64(s1), S::F64(s2)) => S::F64(self.f(s1, l1, s2, l2, d)?),
(S::F8E4M3(s1), S::F8E4M3(s2)) => S::F8E4M3(self.f(s1, l1, s2, l2, d)?),
_ => Err(CudaError::InternalError("dtype mismatch in binary op"))?,
};
Ok(out)
}
}
pub trait Map3 {
#[allow(clippy::too_many_arguments)]
fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>(
&self,
src1: &CudaSlice<T>,
layout1: &Layout,
src2: &CudaSlice<T>,
layout2: &Layout,
src3: &CudaSlice<T>,
layout3: &Layout,
dev: &CudaDevice,
) -> Result<CudaSlice<T>>;
#[allow(clippy::too_many_arguments)]
fn map(
&self,
s1: &S,
l1: &Layout,
s2: &S,
l2: &Layout,
s3: &S,
l3: &Layout,
d: &CudaDevice,
) -> Result<S> {
let out = match (s1, s2, s3) {
(S::U8(s1), S::U8(s2), S::U8(s3)) => S::U8(self.f(s1, l1, s2, l2, s3, l3, d)?),
(S::U32(s1), S::U32(s2), S::U32(s3)) => S::U32(self.f(s1, l1, s2, l2, s3, l3, d)?),
(S::I64(s1), S::I64(s2), S::I64(s3)) => S::I64(self.f(s1, l1, s2, l2, s3, l3, d)?),
(S::BF16(s1), S::BF16(s2), S::BF16(s3)) => S::BF16(self.f(s1, l1, s2, l2, s3, l3, d)?),
(S::F16(s1), S::F16(s2), S::F16(s3)) => S::F16(self.f(s1, l1, s2, l2, s3, l3, d)?),
(S::F32(s1), S::F32(s2), S::F32(s3)) => S::F32(self.f(s1, l1, s2, l2, s3, l3, d)?),
(S::F64(s1), S::F64(s2), S::F64(s3)) => S::F64(self.f(s1, l1, s2, l2, s3, l3, d)?),
(S::F8E4M3(s1), S::F8E4M3(s2), S::F8E4M3(s3)) => {
S::F8E4M3(self.f(s1, l1, s2, l2, s3, l3, d)?)
}
_ => Err(CudaError::InternalError("dtype mismatch in ternary op"))?,
};
Ok(out)
}
}
pub trait Map2InPlace {
fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>(
&self,
dst: &mut CudaSlice<T>,
dst_l: &Layout,
src: &CudaSlice<T>,
src_l: &Layout,
dev: &CudaDevice,
) -> Result<()>;
fn map(
&self,
dst: &mut S,
dst_l: &Layout,
src: &S,
src_l: &Layout,
d: &CudaDevice,
) -> Result<()> {
match (dst, src) {
(S::U8(dst), S::U8(src)) => self.f(dst, dst_l, src, src_l, d),
(S::U32(dst), S::U32(src)) => self.f(dst, dst_l, src, src_l, d),
(S::I64(dst), S::I64(src)) => self.f(dst, dst_l, src, src_l, d),
(S::BF16(dst), S::BF16(src)) => self.f(dst, dst_l, src, src_l, d),
(S::F16(dst), S::F16(src)) => self.f(dst, dst_l, src, src_l, d),
(S::F32(dst), S::F32(src)) => self.f(dst, dst_l, src, src_l, d),
(S::F64(dst), S::F64(src)) => self.f(dst, dst_l, src, src_l, d),
(S::F8E4M3(dst), S::F8E4M3(src)) => self.f(dst, dst_l, src, src_l, d),
_ => Err(CudaError::InternalError("dtype mismatch in binary op"))?,
}
}
}
pub trait Map1Any {
fn f<T: DeviceRepr + WithDType + ValidAsZeroBits, W: Fn(CudaSlice<T>) -> S>(
&self,
src: &CudaSlice<T>,
dev: &CudaDevice,
layout: &Layout,
wrap: W,
) -> Result<S>;
fn map(&self, s: &S, d: &CudaDevice, l: &Layout) -> Result<S> {
let out = match s {
S::U8(s) => self.f(s, d, l, S::U8)?,
S::U32(s) => self.f(s, d, l, S::U32)?,
S::I64(s) => self.f(s, d, l, S::I64)?,
S::BF16(s) => self.f(s, d, l, S::BF16)?,
S::F16(s) => self.f(s, d, l, S::F16)?,
S::F32(s) => self.f(s, d, l, S::F32)?,
S::F64(s) => self.f(s, d, l, S::F64)?,
S::F8E4M3(s) => self.f(s, d, l, S::F8E4M3)?,
};
Ok(out)
}
}
pub trait Map2Any {
fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>(
&self,
src1: &CudaSlice<T>,
layout1: &Layout,
src2: &CudaSlice<T>,
layout2: &Layout,
dev: &CudaDevice,
) -> Result<S>;
fn map(&self, s1: &S, l1: &Layout, s2: &S, l2: &Layout, d: &CudaDevice) -> Result<S> {
let out = match (s1, s2) {
(S::U8(s1), S::U8(s2)) => self.f(s1, l1, s2, l2, d)?,
(S::U32(s1), S::U32(s2)) => self.f(s1, l1, s2, l2, d)?,
(S::I64(s1), S::I64(s2)) => self.f(s1, l1, s2, l2, d)?,
(S::BF16(s1), S::BF16(s2)) => self.f(s1, l1, s2, l2, d)?,
(S::F16(s1), S::F16(s2)) => self.f(s1, l1, s2, l2, d)?,
(S::F32(s1), S::F32(s2)) => self.f(s1, l1, s2, l2, d)?,
(S::F64(s1), S::F64(s2)) => self.f(s1, l1, s2, l2, d)?,
(S::F8E4M3(s1), S::F8E4M3(s2)) => self.f(s1, l1, s2, l2, d)?,
_ => Err(CudaError::InternalError("dtype mismatch in binary op")).w()?,
};
Ok(out)
}
}
| candle/candle-core/src/cuda_backend/utils.rs/0 | {
"file_path": "candle/candle-core/src/cuda_backend/utils.rs",
"repo_id": "candle",
"token_count": 4127
} | 27 |
//! Just enough pickle support to be able to read PyTorch checkpoints.
// This hardcodes objects that are required for tensor reading, we may want to make this a bit more
// composable/tensor agnostic at some point.
use crate::{Context, DType, Error as E, Layout, Result, Tensor};
use byteorder::{LittleEndian, ReadBytesExt};
use std::collections::HashMap;
use std::io::BufRead;
const VERBOSE: bool = false;
// https://docs.juliahub.com/Pickle/LAUNc/0.1.0/opcode/
#[repr(u8)]
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum OpCode {
// https://github.com/python/cpython/blob/ed25f097160b5cbb0c9a1f9a746d2f1bbc96515a/Lib/pickletools.py#L2123
Proto = 0x80,
Global = b'c',
BinPut = b'q',
LongBinPut = b'r',
EmptyTuple = b')',
Reduce = b'R',
Mark = b'(',
BinUnicode = b'X',
BinInt = b'J',
Tuple = b't',
BinPersId = b'Q',
BinInt1 = b'K',
BinInt2 = b'M',
Tuple1 = 0x85,
Tuple2 = 0x86,
Tuple3 = 0x87,
NewTrue = 0x88,
NewFalse = 0x89,
None = b'N',
BinGet = b'h',
LongBinGet = b'j',
SetItem = b's',
SetItems = b'u',
EmptyDict = b'}',
Dict = b'd',
Build = b'b',
Stop = b'.',
NewObj = 0x81,
EmptyList = b']',
BinFloat = b'G',
Append = b'a',
Appends = b'e',
Long1 = 0x8a,
}
// Avoid using FromPrimitive so as not to drag another dependency.
impl TryFrom<u8> for OpCode {
type Error = u8;
fn try_from(value: u8) -> std::result::Result<Self, Self::Error> {
match value {
0x80 => Ok(Self::Proto),
b'c' => Ok(Self::Global),
b'q' => Ok(Self::BinPut),
b'r' => Ok(Self::LongBinPut),
b')' => Ok(Self::EmptyTuple),
b'R' => Ok(Self::Reduce),
b'(' => Ok(Self::Mark),
b'X' => Ok(Self::BinUnicode),
b'J' => Ok(Self::BinInt),
b't' => Ok(Self::Tuple),
b'Q' => Ok(Self::BinPersId),
b'K' => Ok(Self::BinInt1),
b'M' => Ok(Self::BinInt2),
b'N' => Ok(Self::None),
0x85 => Ok(Self::Tuple1),
0x86 => Ok(Self::Tuple2),
0x87 => Ok(Self::Tuple3),
0x88 => Ok(Self::NewTrue),
0x89 => Ok(Self::NewFalse),
b'h' => Ok(Self::BinGet),
b'j' => Ok(Self::LongBinGet),
b's' => Ok(Self::SetItem),
b'u' => Ok(Self::SetItems),
b'}' => Ok(Self::EmptyDict),
b'd' => Ok(Self::EmptyDict),
b'b' => Ok(Self::Build),
b'.' => Ok(Self::Stop),
0x81 => Ok(Self::NewObj),
b']' => Ok(Self::EmptyList),
b'G' => Ok(Self::BinFloat),
b'a' => Ok(Self::Append),
b'e' => Ok(Self::Appends),
0x8a => Ok(Self::Long1),
value => Err(value),
}
}
}
fn read_to_newline<R: BufRead>(r: &mut R) -> Result<Vec<u8>> {
let mut data: Vec<u8> = Vec::with_capacity(32);
r.read_until(b'\n', &mut data)?;
data.pop();
if data.last() == Some(&b'\r') {
data.pop();
}
Ok(data)
}
#[derive(Debug, Clone, PartialEq)]
pub enum Object {
Class {
module_name: String,
class_name: String,
},
Int(i32),
Long(i64),
Float(f64),
Unicode(String),
Bool(bool),
None,
Tuple(Vec<Object>),
List(Vec<Object>),
Mark,
Dict(Vec<(Object, Object)>),
Reduce {
callable: Box<Object>,
args: Box<Object>,
},
Build {
callable: Box<Object>,
args: Box<Object>,
},
PersistentLoad(Box<Object>),
}
type OResult<T> = std::result::Result<T, Object>;
impl Object {
pub fn unicode(self) -> OResult<String> {
match self {
Self::Unicode(t) => Ok(t),
_ => Err(self),
}
}
pub fn reduce(self) -> OResult<(Self, Self)> {
match self {
Self::Reduce { callable, args } => Ok((*callable, *args)),
_ => Err(self),
}
}
pub fn none(self) -> OResult<()> {
match self {
Self::None => Ok(()),
_ => Err(self),
}
}
pub fn persistent_load(self) -> OResult<Self> {
match self {
Self::PersistentLoad(t) => Ok(*t),
_ => Err(self),
}
}
pub fn bool(self) -> OResult<bool> {
match self {
Self::Bool(t) => Ok(t),
_ => Err(self),
}
}
pub fn int(self) -> OResult<i32> {
match self {
Self::Int(t) => Ok(t),
_ => Err(self),
}
}
pub fn int_or_long(self) -> OResult<i64> {
match self {
Self::Int(t) => Ok(t as i64),
Self::Long(t) => Ok(t),
_ => Err(self),
}
}
pub fn tuple(self) -> OResult<Vec<Self>> {
match self {
Self::Tuple(t) => Ok(t),
_ => Err(self),
}
}
pub fn dict(self) -> OResult<Vec<(Self, Self)>> {
match self {
Self::Dict(t) => Ok(t),
_ => Err(self),
}
}
pub fn class(self) -> OResult<(String, String)> {
match self {
Self::Class {
module_name,
class_name,
} => Ok((module_name, class_name)),
_ => Err(self),
}
}
pub fn into_tensor_info(
self,
name: Self,
dir_name: &std::path::Path,
) -> Result<Option<TensorInfo>> {
let name = match name.unicode() {
Ok(name) => name,
Err(_) => return Ok(None),
};
let (callable, args) = match self.reduce() {
Ok(callable_args) => callable_args,
_ => return Ok(None),
};
let (callable, args) = match callable {
Object::Class {
module_name,
class_name,
} if module_name == "torch._tensor" && class_name == "_rebuild_from_type_v2" => {
let mut args = args.tuple()?;
let callable = args.remove(0);
let args = args.remove(1);
(callable, args)
}
Object::Class {
module_name,
class_name,
} if module_name == "torch._utils" && class_name == "_rebuild_parameter" => {
let mut args = args.tuple()?;
args.remove(0).reduce()?
}
_ => (callable, args),
};
match callable {
Object::Class {
module_name,
class_name,
} if module_name == "torch._utils" && class_name == "_rebuild_tensor_v2" => {}
_ => return Ok(None),
};
let (layout, dtype, file_path, storage_size) = rebuild_args(args)?;
Ok(Some(TensorInfo {
name,
dtype,
layout,
path: format!("{}/{}", dir_name.to_string_lossy(), file_path),
storage_size,
}))
}
}
impl TryFrom<Object> for String {
type Error = Object;
fn try_from(value: Object) -> std::result::Result<Self, Self::Error> {
match value {
Object::Unicode(s) => Ok(s),
other => Err(other),
}
}
}
impl TryFrom<Object> for usize {
type Error = Object;
fn try_from(value: Object) -> std::result::Result<Self, Self::Error> {
match value {
Object::Int(s) if s >= 0 => Ok(s as usize),
other => Err(other),
}
}
}
impl<T: TryFrom<Object, Error = Object>> TryFrom<Object> for Vec<T> {
type Error = Object;
fn try_from(value: Object) -> std::result::Result<Self, Self::Error> {
match value {
Object::Tuple(values) => {
// This does not return the appropriate value in the error case but instead return
// the object related to the first error.
values
.into_iter()
.map(|v| T::try_from(v))
.collect::<std::result::Result<Vec<T>, Self::Error>>()
}
other => Err(other),
}
}
}
#[derive(Debug)]
pub struct Stack {
stack: Vec<Object>,
memo: HashMap<u32, Object>,
}
impl Stack {
pub fn empty() -> Self {
Self {
stack: Vec::with_capacity(512),
memo: HashMap::new(),
}
}
pub fn stack(&self) -> &[Object] {
self.stack.as_slice()
}
pub fn read_loop<R: BufRead>(&mut self, r: &mut R) -> Result<()> {
loop {
if self.read(r)? {
break;
}
}
Ok(())
}
pub fn finalize(mut self) -> Result<Object> {
self.pop()
}
fn push(&mut self, obj: Object) {
self.stack.push(obj)
}
fn pop(&mut self) -> Result<Object> {
match self.stack.pop() {
None => crate::bail!("unexpected empty stack"),
Some(obj) => Ok(obj),
}
}
// https://docs.juliahub.com/Pickle/LAUNc/0.1.0/opcode/#Pickle.OpCodes.BUILD
fn build(&mut self) -> Result<()> {
let args = self.pop()?;
let obj = self.pop()?;
let obj = match (obj, args) {
(Object::Dict(mut obj), Object::Dict(mut args)) => {
obj.append(&mut args);
Object::Dict(obj)
}
(obj, args) => Object::Build {
callable: Box::new(obj),
args: Box::new(args),
},
};
self.push(obj);
Ok(())
}
fn reduce(&mut self) -> Result<()> {
let args = self.pop()?;
let callable = self.pop()?;
#[allow(clippy::single_match)]
let reduced = match &callable {
Object::Class {
module_name,
class_name,
} => {
if module_name == "collections"
&& (class_name == "OrderedDict" || class_name == "defaultdict")
{
// TODO: have a separate ordered dict and a separate default dict.
Some(Object::Dict(vec![]))
} else {
None
}
}
_ => None,
};
let reduced = reduced.unwrap_or_else(|| Object::Reduce {
callable: Box::new(callable),
args: Box::new(args),
});
self.push(reduced);
Ok(())
}
fn last(&mut self) -> Result<&mut Object> {
match self.stack.last_mut() {
None => crate::bail!("unexpected empty stack"),
Some(obj) => Ok(obj),
}
}
fn memo_get(&self, id: u32) -> Result<Object> {
match self.memo.get(&id) {
None => crate::bail!("missing object in memo {id}"),
Some(obj) => {
// Maybe we should use refcounting rather than doing potential large clones here.
Ok(obj.clone())
}
}
}
fn memo_put(&mut self, id: u32) -> Result<()> {
let obj = self.last()?.clone();
self.memo.insert(id, obj);
Ok(())
}
fn persistent_load(&self, id: Object) -> Result<Object> {
Ok(Object::PersistentLoad(Box::new(id)))
}
fn new_obj(&self, class: Object, args: Object) -> Result<Object> {
Ok(Object::Reduce {
callable: Box::new(class),
args: Box::new(args),
})
}
fn pop_to_marker(&mut self) -> Result<Vec<Object>> {
let mut mark_idx = None;
for (idx, obj) in self.stack.iter().enumerate().rev() {
if obj == &Object::Mark {
mark_idx = Some(idx);
break;
}
}
match mark_idx {
Some(mark_idx) => {
let objs = self.stack.split_off(mark_idx + 1);
self.stack.pop();
Ok(objs)
}
None => {
crate::bail!("marker object not found")
}
}
}
pub fn read<R: BufRead>(&mut self, r: &mut R) -> Result<bool> {
let op_code = match OpCode::try_from(r.read_u8()?) {
Ok(op_code) => op_code,
Err(op_code) => {
crate::bail!("unknown op-code {op_code}")
}
};
// println!("op: {op_code:?}");
// println!("{:?}", self.stack);
match op_code {
OpCode::Proto => {
let version = r.read_u8()?;
if VERBOSE {
println!("proto {version}");
}
}
OpCode::Global => {
let module_name = read_to_newline(r)?;
let class_name = read_to_newline(r)?;
let module_name = String::from_utf8_lossy(&module_name).to_string();
let class_name = String::from_utf8_lossy(&class_name).to_string();
self.push(Object::Class {
module_name,
class_name,
})
}
OpCode::BinInt1 => {
let arg = r.read_u8()?;
self.push(Object::Int(arg as i32))
}
OpCode::BinInt2 => {
let arg = r.read_u16::<LittleEndian>()?;
self.push(Object::Int(arg as i32))
}
OpCode::BinInt => {
let arg = r.read_i32::<LittleEndian>()?;
self.push(Object::Int(arg))
}
OpCode::BinFloat => {
// Somehow floats are encoded using BigEndian whereas int types use LittleEndian.
// https://github.com/python/cpython/blob/0c80da4c14d904a367968955544dd6ae58c8101c/Lib/pickletools.py#L855
// https://github.com/pytorch/pytorch/blob/372d078f361e726bb4ac0884ac334b04c58179ef/torch/_weights_only_unpickler.py#L243
let arg = r.read_f64::<byteorder::BigEndian>()?;
self.push(Object::Float(arg))
}
OpCode::BinUnicode => {
let len = r.read_u32::<LittleEndian>()?;
let mut data = vec![0u8; len as usize];
r.read_exact(&mut data)?;
let data = String::from_utf8(data).map_err(E::wrap)?;
self.push(Object::Unicode(data))
}
OpCode::BinPersId => {
let id = self.pop()?;
let obj = self.persistent_load(id)?;
self.push(obj)
}
OpCode::Tuple => {
let objs = self.pop_to_marker()?;
self.push(Object::Tuple(objs))
}
OpCode::Tuple1 => {
let obj = self.pop()?;
self.push(Object::Tuple(vec![obj]))
}
OpCode::Tuple2 => {
let obj2 = self.pop()?;
let obj1 = self.pop()?;
self.push(Object::Tuple(vec![obj1, obj2]))
}
OpCode::Tuple3 => {
let obj3 = self.pop()?;
let obj2 = self.pop()?;
let obj1 = self.pop()?;
self.push(Object::Tuple(vec![obj1, obj2, obj3]))
}
OpCode::NewTrue => self.push(Object::Bool(true)),
OpCode::NewFalse => self.push(Object::Bool(false)),
OpCode::Append => {
let value = self.pop()?;
let pylist = self.last()?;
if let Object::List(d) = pylist {
d.push(value)
} else {
crate::bail!("expected a list, got {pylist:?}")
}
}
OpCode::Appends => {
let objs = self.pop_to_marker()?;
let pylist = self.last()?;
if let Object::List(d) = pylist {
d.extend(objs)
} else {
crate::bail!("expected a list, got {pylist:?}")
}
}
OpCode::SetItem => {
let value = self.pop()?;
let key = self.pop()?;
let pydict = self.last()?;
if let Object::Dict(d) = pydict {
d.push((key, value))
} else {
crate::bail!("expected a dict, got {pydict:?}")
}
}
OpCode::SetItems => {
let mut objs = self.pop_to_marker()?;
let pydict = self.last()?;
if let Object::Dict(d) = pydict {
if objs.len() % 2 != 0 {
crate::bail!("setitems: not an even number of objects")
}
while let Some(value) = objs.pop() {
let key = objs.pop().context("empty objs")?;
d.push((key, value))
}
} else {
crate::bail!("expected a dict, got {pydict:?}")
}
}
OpCode::None => self.push(Object::None),
OpCode::Stop => {
return Ok(true);
}
OpCode::Build => self.build()?,
OpCode::EmptyDict => self.push(Object::Dict(vec![])),
OpCode::Dict => {
let mut objs = self.pop_to_marker()?;
let mut pydict = vec![];
if objs.len() % 2 != 0 {
crate::bail!("setitems: not an even number of objects")
}
while let Some(value) = objs.pop() {
let key = objs.pop().context("empty objs")?;
pydict.push((key, value))
}
self.push(Object::Dict(pydict))
}
OpCode::Mark => self.push(Object::Mark),
OpCode::Reduce => self.reduce()?,
OpCode::EmptyTuple => self.push(Object::Tuple(vec![])),
OpCode::EmptyList => self.push(Object::List(vec![])),
OpCode::BinGet => {
let arg = r.read_u8()?;
let obj = self.memo_get(arg as u32)?;
self.push(obj)
}
OpCode::LongBinGet => {
let arg = r.read_u32::<LittleEndian>()?;
let obj = self.memo_get(arg)?;
self.push(obj)
}
OpCode::BinPut => {
let arg = r.read_u8()?;
self.memo_put(arg as u32)?
}
OpCode::LongBinPut => {
let arg = r.read_u32::<LittleEndian>()?;
self.memo_put(arg)?
}
OpCode::NewObj => {
let args = self.pop()?;
let class = self.pop()?;
let obj = self.new_obj(class, args)?;
self.push(obj)
}
OpCode::Long1 => {
let n_bytes = r.read_u8()?;
let mut v = 0;
// Decode the next n bytes in little endian
for i in 0..n_bytes {
v |= (r.read_u8()? as i64) << (i * 8);
}
self.push(Object::Long(v))
}
}
Ok(false)
}
}
impl From<Object> for E {
fn from(value: Object) -> Self {
E::Msg(format!("conversion error on {value:?}"))
}
}
// https://github.com/pytorch/pytorch/blob/4eac43d046ded0f0a5a5fa8db03eb40f45bf656e/torch/_utils.py#L198
// Arguments: storage, storage_offset, size, stride, requires_grad, backward_hooks
fn rebuild_args(args: Object) -> Result<(Layout, DType, String, usize)> {
let mut args = args.tuple()?;
let stride = Vec::<usize>::try_from(args.remove(3))?;
let size = Vec::<usize>::try_from(args.remove(2))?;
let offset = args.remove(1).int_or_long()? as usize;
let storage = args.remove(0).persistent_load()?;
let mut storage = storage.tuple()?;
let storage_size = storage.remove(4).int_or_long()? as usize;
let path = storage.remove(2).unicode()?;
let (_module_name, class_name) = storage.remove(1).class()?;
let dtype = match class_name.as_str() {
"FloatStorage" => DType::F32,
"DoubleStorage" => DType::F64,
"HalfStorage" => DType::F16,
"BFloat16Storage" => DType::BF16,
"ByteStorage" => DType::U8,
"LongStorage" => DType::I64,
other => {
crate::bail!("unsupported storage type {other}")
}
};
let layout = Layout::new(
crate::Shape::from(size),
stride,
offset * dtype.size_in_bytes(),
);
Ok((layout, dtype, path, storage_size))
}
#[derive(Debug, Clone)]
pub struct TensorInfo {
pub name: String,
pub dtype: DType,
pub layout: Layout,
pub path: String,
pub storage_size: usize,
}
/// Read the tensor info from a .pth file.
///
/// # Arguments
/// * `file` - The path to the .pth file.
/// * `verbose` - Whether to print debug information.
/// * `key` - Optional key to retrieve `state_dict` from the pth file.
pub fn read_pth_tensor_info<P: AsRef<std::path::Path>>(
file: P,
verbose: bool,
key: Option<&str>,
) -> Result<Vec<TensorInfo>> {
let file = std::fs::File::open(file)?;
let zip_reader = std::io::BufReader::new(file);
let mut zip = zip::ZipArchive::new(zip_reader)?;
let zip_file_names = zip
.file_names()
.map(|f| f.to_string())
.collect::<Vec<String>>();
let mut tensor_infos = vec![];
for file_name in zip_file_names.iter() {
if !file_name.ends_with("data.pkl") {
continue;
}
let dir_name = std::path::PathBuf::from(file_name.strip_suffix(".pkl").context("no .pkl")?);
let reader = zip.by_name(file_name)?;
let mut reader = std::io::BufReader::new(reader);
let mut stack = Stack::empty();
stack.read_loop(&mut reader)?;
let obj = stack.finalize()?;
if VERBOSE || verbose {
println!("{obj:#?}");
}
let obj = match obj {
Object::Build { callable, args } => match *callable {
Object::Reduce { callable, args: _ } => match *callable {
Object::Class {
module_name,
class_name,
} if module_name == "__torch__" && class_name == "Module" => *args,
_ => continue,
},
_ => continue,
},
obj => obj,
};
// If key is provided, then we need to extract the state_dict from the object.
let obj = if let Some(key) = key {
if let Object::Dict(key_values) = obj {
key_values
.into_iter()
.find(|(k, _)| *k == Object::Unicode(key.to_owned()))
.map(|(_, v)| v)
.ok_or_else(|| E::Msg(format!("key {key} not found")))?
} else {
obj
}
} else {
obj
};
// If the object is a dict, then we can extract the tensor info from it.
// NOTE: We are assuming that the `obj` is state_dict by this stage.
if let Object::Dict(key_values) = obj {
for (name, value) in key_values.into_iter() {
match value.into_tensor_info(name, &dir_name) {
Ok(Some(tensor_info)) => tensor_infos.push(tensor_info),
Ok(None) => {}
Err(err) => eprintln!("skipping: {err:?}"),
}
}
}
}
Ok(tensor_infos)
}
/// Lazy tensor loader.
pub struct PthTensors {
tensor_infos: HashMap<String, TensorInfo>,
path: std::path::PathBuf,
// We do not store a zip reader as it needs mutable access to extract data. Instead we
// re-create a zip reader for each tensor.
}
impl PthTensors {
pub fn new<P: AsRef<std::path::Path>>(path: P, key: Option<&str>) -> Result<Self> {
let tensor_infos = read_pth_tensor_info(path.as_ref(), false, key)?;
let tensor_infos = tensor_infos
.into_iter()
.map(|ti| (ti.name.to_string(), ti))
.collect();
let path = path.as_ref().to_owned();
Ok(Self { tensor_infos, path })
}
pub fn tensor_infos(&self) -> &HashMap<String, TensorInfo> {
&self.tensor_infos
}
pub fn get(&self, name: &str) -> Result<Option<Tensor>> {
use std::io::Read;
let tensor_info = match self.tensor_infos.get(name) {
None => return Ok(None),
Some(tensor_info) => tensor_info,
};
// We hope that the file has not changed since first reading it.
let zip_reader = std::io::BufReader::new(std::fs::File::open(&self.path)?);
let mut zip = zip::ZipArchive::new(zip_reader)?;
let mut reader = zip.by_name(&tensor_info.path)?;
let is_fortran_contiguous = tensor_info.layout.is_fortran_contiguous();
let rank = tensor_info.layout.shape().rank();
// Reading the data is a bit tricky as it can be strided, for now only support the basic
// case and when the tensor is fortran contiguous.
if !tensor_info.layout.is_contiguous() && !is_fortran_contiguous {
crate::bail!(
"cannot retrieve non-contiguous tensors {:?}",
tensor_info.layout
)
}
let start_offset = tensor_info.layout.start_offset();
if start_offset > 0 {
std::io::copy(
&mut reader.by_ref().take(start_offset as u64),
&mut std::io::sink(),
)?;
}
let tensor = Tensor::from_reader(
tensor_info.layout.shape().clone(),
tensor_info.dtype,
&mut reader,
)?;
if rank > 1 && is_fortran_contiguous {
// Reverse the shape, e.g. Shape(2, 3, 4) -> Shape(4, 3, 2)
let shape_reversed: Vec<_> = tensor_info.layout.dims().iter().rev().cloned().collect();
let tensor = tensor.reshape(shape_reversed)?;
// Permute (transpose) the dimensions, e.g. Shape(4, 3, 2) -> Shape(2, 3, 4)
let dim_indeces_reversed: Vec<_> = (0..rank).rev().collect();
let tensor = tensor.permute(dim_indeces_reversed)?;
Ok(Some(tensor))
} else {
Ok(Some(tensor))
}
}
}
/// Read all the tensors from a PyTorch pth file with a given key.
///
/// # Arguments
/// * `path` - Path to the pth file.
/// * `key` - Optional key to retrieve `state_dict` from the pth file. Sometimes the pth file
/// contains multiple objects and the state_dict is the one we are interested in.
pub fn read_all_with_key<P: AsRef<std::path::Path>>(
path: P,
key: Option<&str>,
) -> Result<Vec<(String, Tensor)>> {
let pth = PthTensors::new(path, key)?;
let tensor_names = pth.tensor_infos.keys();
let mut tensors = Vec::with_capacity(tensor_names.len());
for name in tensor_names {
if let Some(tensor) = pth.get(name)? {
tensors.push((name.to_string(), tensor))
}
}
Ok(tensors)
}
/// Read all the tensors from a PyTorch pth file.
///
/// # Arguments
/// * `path` - Path to the pth file.
pub fn read_all<P: AsRef<std::path::Path>>(path: P) -> Result<Vec<(String, Tensor)>> {
read_all_with_key(path, None)
}
| candle/candle-core/src/pickle.rs/0 | {
"file_path": "candle/candle-core/src/pickle.rs",
"repo_id": "candle",
"token_count": 14742
} | 28 |
use crate::{Result, Tensor};
use rayon::prelude::*;
#[derive(Debug, Clone, Copy)]
struct ArgSort {
asc: bool,
last_dim: usize,
}
impl ArgSort {
fn asort<T: crate::WithDType>(&self, vs: &[T], layout: &crate::Layout) -> Vec<u32> {
#[allow(clippy::uninit_vec)]
// Safety: indexes are set later in the parallelized section.
let mut sort_indexes = unsafe {
let el_count = layout.shape().elem_count();
let mut v = Vec::with_capacity(el_count);
v.set_len(el_count);
v
};
if self.asc {
sort_indexes
.par_chunks_exact_mut(self.last_dim)
.zip(vs.par_chunks_exact(self.last_dim))
.for_each(|(indexes, vs)| {
indexes
.iter_mut()
.enumerate()
.for_each(|(i, v)| *v = i as u32);
indexes.sort_by(|&i, &j| {
vs[i as usize]
.partial_cmp(&vs[j as usize])
.unwrap_or(std::cmp::Ordering::Greater)
})
});
} else {
sort_indexes
.par_chunks_exact_mut(self.last_dim)
.zip(vs.par_chunks_exact(self.last_dim))
.for_each(|(indexes, vs)| {
indexes
.iter_mut()
.enumerate()
.for_each(|(i, v)| *v = i as u32);
indexes.sort_by(|&j, &i| {
vs[i as usize]
.partial_cmp(&vs[j as usize])
.unwrap_or(std::cmp::Ordering::Greater)
})
});
}
sort_indexes
}
}
#[cfg(feature = "cuda")]
mod cuda {
use super::*;
use crate::cuda_backend::cudarc::driver::{
CudaSlice, DeviceRepr, LaunchConfig, ValidAsZeroBits,
};
use crate::cuda_backend::{kernel_name, kernels, CudaStorageSlice as S, WrapErr};
use crate::{CudaDevice, WithDType};
fn next_power_of_2(x: usize) -> usize {
let mut n = 1;
while n < x {
n *= 2
}
n
}
impl crate::cuda_backend::Map1Any for ArgSort {
fn f<T: DeviceRepr + WithDType + ValidAsZeroBits, W: Fn(CudaSlice<T>) -> S>(
&self,
src: &CudaSlice<T>,
dev: &CudaDevice,
layout: &crate::Layout,
_wrap: W,
) -> Result<S> {
use cudarc::driver::PushKernelArg;
let slice = match layout.contiguous_offsets() {
None => crate::bail!("input has to be contiguous"),
Some((o1, o2)) => src.slice(o1..o2),
};
let elem_count = layout.shape().elem_count();
let dst = unsafe { dev.alloc::<u32>(elem_count)? };
let func = if self.asc {
dev.get_or_load_func(&kernel_name::<T>("asort_asc"), &kernels::SORT)?
} else {
dev.get_or_load_func(&kernel_name::<T>("asort_desc"), &kernels::SORT)?
};
let ncols = self.last_dim;
let nrows = elem_count / ncols;
let ncols_pad = next_power_of_2(ncols);
let cfg = LaunchConfig {
grid_dim: (nrows as u32, 1, 1),
block_dim: (ncols_pad as u32, 1, 1),
shared_mem_bytes: (ncols_pad * std::mem::size_of::<u32>()) as u32,
};
let stream = dev.cuda_stream();
let mut builder = stream.launch_builder(&func);
let ncols = ncols as i32;
let ncols_pad = ncols_pad as i32;
builder.arg(&slice).arg(&dst).arg(&ncols).arg(&ncols_pad);
unsafe { builder.launch(cfg) }.w()?;
Ok(S::U32(dst))
}
}
}
impl crate::CustomOp1 for ArgSort {
fn name(&self) -> &'static str {
"argsort"
}
fn cpu_fwd(
&self,
storage: &crate::CpuStorage,
layout: &crate::Layout,
) -> Result<(crate::CpuStorage, crate::Shape)> {
let sort_indexes = match storage {
crate::CpuStorage::U8(vs) => self.asort(vs, layout),
crate::CpuStorage::U32(vs) => self.asort(vs, layout),
crate::CpuStorage::I64(vs) => self.asort(vs, layout),
crate::CpuStorage::BF16(vs) => self.asort(vs, layout),
crate::CpuStorage::F16(vs) => self.asort(vs, layout),
crate::CpuStorage::F32(vs) => self.asort(vs, layout),
crate::CpuStorage::F64(vs) => self.asort(vs, layout),
crate::CpuStorage::F8E4M3(vs) => self.asort(vs, layout),
};
let sort_indexes = crate::CpuStorage::U32(sort_indexes);
Ok((sort_indexes, layout.shape().into()))
}
#[cfg(feature = "cuda")]
fn cuda_fwd(
&self,
storage: &crate::CudaStorage,
layout: &crate::Layout,
) -> Result<(crate::CudaStorage, crate::Shape)> {
use crate::backend::BackendStorage;
use crate::cuda_backend::Map1Any;
let dev = storage.device();
let slice = self.map(&storage.slice, dev, layout)?;
let dst = crate::cuda_backend::CudaStorage {
slice,
device: dev.clone(),
};
Ok((dst, layout.shape().clone()))
}
#[cfg(feature = "metal")]
fn metal_fwd(
&self,
storage: &crate::MetalStorage,
layout: &crate::Layout,
) -> Result<(crate::MetalStorage, crate::Shape)> {
use crate::backend::BackendStorage;
use crate::DType;
let name = {
if self.asc {
match storage.dtype() {
DType::BF16 => "asort_asc_bf16",
DType::F16 => "asort_asc_f16",
DType::F32 => "asort_asc_f32",
DType::F64 => "asort_asc_f64",
DType::U8 => "asort_asc_u8",
DType::U32 => "asort_asc_u32",
DType::I64 => "asort_asc_i64",
DType::F8E4M3 => crate::bail!("Metal device does not yet support F8E4M3."),
}
} else {
match storage.dtype() {
DType::BF16 => "asort_desc_bf16",
DType::F16 => "asort_desc_f16",
DType::F32 => "asort_desc_f32",
DType::F64 => "asort_desc_f64",
DType::U8 => "asort_desc_u8",
DType::U32 => "asort_desc_u32",
DType::I64 => "asort_desc_i64",
DType::F8E4M3 => crate::bail!("Metal device does not yet support F8E4M3."),
}
}
};
let device = storage.device();
let kernels = device.kernels();
let command_buffer = device.command_buffer()?;
let el = layout.shape().elem_count();
let ncols = self.last_dim;
let nrows = el / ncols;
let src = crate::metal_backend::buffer_o(storage.buffer(), layout, storage.dtype());
let dst = device.new_buffer(el, DType::U32, "asort")?;
let mut ncols_pad = 1;
while ncols_pad < ncols {
ncols_pad *= 2;
}
candle_metal_kernels::call_arg_sort(
device.metal_device(),
&command_buffer,
kernels,
name,
nrows,
ncols,
ncols_pad,
src,
&dst,
)
.map_err(crate::Error::wrap)?;
let dst = crate::MetalStorage::new(dst, device.clone(), el, DType::U32);
Ok((dst, layout.shape().clone()))
}
}
impl Tensor {
/// Returns the indices that sort the tensor along the last dimension.
///
/// If `asc` is `true`, sorting is in ascending order. Otherwise sorting is performed in
/// descending order. The sort is unstable so there is no guarantees on the final order when it
/// comes to ties.
pub fn arg_sort_last_dim(&self, asc: bool) -> Result<Tensor> {
if !self.is_contiguous() {
return Err(crate::Error::RequiresContiguous {
op: "arg_sort_last_dim",
});
}
let last_dim = match self.dims().last() {
None => crate::bail!("empty last-dim in arg-sort"),
Some(last_dim) => *last_dim,
};
// No need for a backward pass for arg sort.
self.apply_op1_no_bwd(&ArgSort { asc, last_dim })
}
/// Sorts the tensor along the last dimension, returns the sorted tensor together with the
/// sorted indexes.
///
/// If `asc` is `true`, sorting is in ascending order. Otherwise sorting is performed in
/// descending order. The sort is unstable so there is no guarantees on the final order when it
/// comes to ties.
pub fn sort_last_dim(&self, asc: bool) -> Result<(Tensor, Tensor)> {
if !self.is_contiguous() {
return Err(crate::Error::RequiresContiguous {
op: "sort_last_dim",
});
}
let asort = self.arg_sort_last_dim(asc)?;
let sorted = self.gather(&asort, crate::D::Minus1)?;
Ok((sorted, asort))
}
}
| candle/candle-core/src/sort.rs/0 | {
"file_path": "candle/candle-core/src/sort.rs",
"repo_id": "candle",
"token_count": 4992
} | 29 |
use candle_core::{test_device, DType, Device, IndexOp, Result, Tensor};
fn matmul(device: &Device) -> Result<()> {
let data = vec![1.0f32, 2.0, 3.0, 4.0];
let a = Tensor::from_slice(&data, (2, 2), device)?;
let data = vec![1.0f32, 2.0, 3.0, 4.0];
let b = Tensor::from_slice(&data, (2, 2), device)?;
let c = a.matmul(&b)?;
assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]);
let data = vec![1.0f32, 2.0];
let a = Tensor::from_slice(&data, (2, 1), device)?;
let data = vec![3.0f32, 4.0];
let b = Tensor::from_slice(&data, (1, 2), device)?;
let c = a.matmul(&b)?;
assert_eq!(c.to_vec2::<f32>()?, &[&[3.0, 4.0], &[6.0, 8.0]]);
let data: Vec<_> = (0..6).map(|i| i as f32).collect();
let a = Tensor::from_slice(&data, (2, 3), device)?;
let data: Vec<_> = (0..6).map(|i| (i + 2) as f32).collect();
let b = Tensor::from_slice(&data, (3, 2), device)?;
let c = a.matmul(&b)?;
assert_eq!(c.to_vec2::<f32>()?, &[&[16., 19.], &[52., 64.]]);
let data: Vec<_> = (0..12).map(|i| i as f32).collect();
let a = Tensor::from_slice(&data, (2, 2, 3), device)?;
let data: Vec<_> = (0..12).map(|i| (i + 2) as f32).collect();
let b = Tensor::from_slice(&data, (2, 3, 2), device)?;
let expected = [[[16., 19.], [52., 64.]], [[214., 235.], [304., 334.]]];
let c = a.matmul(&b)?;
assert_eq!(c.to_vec3::<f32>()?, &expected);
// Also perform the matmul on contiguous transposed versions.
let a_tt = a.t()?.contiguous()?.t()?;
assert!(!a_tt.is_contiguous());
assert_eq!(a.dims(), a_tt.dims());
assert_eq!(a_tt.stride(), &[6, 1, 2]);
let b_tt = b.t()?.contiguous()?.t()?;
assert!(!b_tt.is_contiguous());
assert_eq!(b.dims(), b_tt.dims());
assert_eq!(b_tt.stride(), &[6, 1, 3]);
assert_eq!(a_tt.matmul(&b)?.to_vec3::<f32>()?, &expected);
assert_eq!(a.matmul(&b_tt)?.to_vec3::<f32>()?, &expected);
assert_eq!(a_tt.matmul(&b_tt)?.to_vec3::<f32>()?, &expected);
Ok(())
}
fn matmul_bf16(device: &Device) -> Result<()> {
if !device.supports_bf16() {
return Ok(());
}
let data = vec![1.0f32, 2.0, 3.0, 4.0];
let a = Tensor::from_slice(&data, (2, 2), device)?.to_dtype(DType::BF16)?;
let data = vec![1.0f32, 2.0, 3.0, 4.0];
let b = Tensor::from_slice(&data, (2, 2), device)?.to_dtype(DType::BF16)?;
let c = a.matmul(&b)?.to_dtype(DType::F32)?;
assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]);
Ok(())
}
fn broadcast_matmul(device: &Device) -> Result<()> {
let lhs = Tensor::randn(0f32, 1f32, (3, 1, 4, 5), device)?;
let rhs = Tensor::randn(0f32, 1f32, (6, 5, 2), device)?;
let out = lhs.broadcast_matmul(&rhs)?;
assert_eq!(out.dims(), &[3, 6, 4, 2]);
for idx1 in 0..3 {
for idx2 in 0..6 {
let out = out.i((idx1, idx2))?;
let lhs = lhs.i((idx1, 0))?;
let rhs = rhs.i(idx2)?;
let out2 = lhs.matmul(&rhs);
let sum_diff2 = (out - out2)?.sqr()?.sum_all()?;
// With cuda, we see errors of up to ~1e-12.
assert!(sum_diff2.to_vec0::<f32>()? < 1e-6)
}
}
Ok(())
}
#[test]
fn tensor_dot() -> Result<()> {
let lhs = Tensor::new(&[1., 2., 3.], &Device::Cpu)?;
let rhs = Tensor::new(&[4., 5., 6.], &Device::Cpu)?;
let expected = Tensor::new(32., &Device::Cpu)?;
let dot_ret = lhs.dot(&rhs)?;
candle_core::test_utils::assert_tensor_eq(&dot_ret, &expected)?;
Ok(())
}
#[test]
fn tensor_mv() -> Result<()> {
let mat = Tensor::new(&[[1., 2., 3.], [4., 5., 6.]], &Device::Cpu)?;
let vec = Tensor::new(&[1., 1., 1.], &Device::Cpu)?;
let expected = Tensor::new(&[6., 15.], &Device::Cpu)?;
let mv_ret = mat.mv(&vec)?;
candle_core::test_utils::assert_tensor_eq(&mv_ret, &expected)?;
Ok(())
}
// https://github.com/huggingface/candle/issues/1948
fn squeeze_mm(device: &Device) -> Result<()> {
let seq_len = 8_usize;
let a = Tensor::zeros((1, seq_len, 16), DType::F32, device)?;
let x = a.i((.., seq_len - 1, ..))?;
let w = Tensor::zeros((32, 16), DType::F32, device)?.t()?;
let x = x.matmul(&w)?;
assert_eq!(x.dims(), &[1, 32]);
Ok(())
}
// https://github.com/huggingface/candle/issues/1992
fn mm_layout(device: &Device) -> Result<()> {
let a = Tensor::arange(0f32, 16f32, device)?.reshape((1, 1, 4, 4))?;
let b = Tensor::arange(0f32, 8f32, device)?.reshape((1, 1, 4, 2))?;
let mm1 = a.matmul(&b)?;
// Forces the layout to be:
// shape: [1, 1, 4, 2], stride: [8, 2, 2, 1], start_offset: 0
// This is still a contiguous matrix but matmul checks are only the two last dimensions have
// non 1 sizes but matmul check may be reluctant to handle it.
let b = b.transpose(1, 2)?.force_contiguous()?.transpose(1, 2)?;
let mm2 = a.matmul(&b)?;
let diff = (mm1 - mm2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
Ok(())
}
test_device!(matmul, matmul_cpu, matmul_gpu, matmul_metal);
test_device!(
matmul_bf16,
matmul_bf16_cpu,
matmul_bf16_gpu,
matmul_bf16_metal
);
test_device!(
broadcast_matmul,
broadcast_matmul_cpu,
broadcast_matmul_gpu,
broadcast_matmul_metal
);
test_device!(squeeze_mm, squeeze_mm_cpu, squeeze_mm_gpu, squeeze_mm_metal);
test_device!(mm_layout, mm_layout_cpu, mm_layout_gpu, mm_layout_metal);
| candle/candle-core/tests/matmul_tests.rs/0 | {
"file_path": "candle/candle-core/tests/matmul_tests.rs",
"repo_id": "candle",
"token_count": 2675
} | 30 |
//! Datasets & Dataloaders for Candle
pub mod batcher;
pub mod hub;
pub mod nlp;
pub mod vision;
pub use batcher::Batcher;
| candle/candle-datasets/src/lib.rs/0 | {
"file_path": "candle/candle-datasets/src/lib.rs",
"repo_id": "candle",
"token_count": 45
} | 31 |
# candle-starcoder: code generation model
[StarCoder/BigCode](https://huggingface.co/bigcode/starcoderbase-1b) is a LLM
model specialized to code generation. The initial model was trained on 80
programming languages.
## Running some example
```bash
cargo run --example bigcode --release -- --prompt "fn fact(n: u64) -> u64 "
> fn fact(n: u64) -> u64 {
> if n == 0 {
> 1
> } else {
> n * fact(n - 1)
> }
> }
```
| candle/candle-examples/examples/bigcode/README.md/0 | {
"file_path": "candle/candle-examples/examples/bigcode/README.md",
"repo_id": "candle",
"token_count": 180
} | 32 |
# candle-convnext
[A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) and
[ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808).
This candle implementation uses a pre-trained ConvNeXt network for inference. The
classification head has been trained on the ImageNet dataset and returns the
probabilities for the top-5 classes.
## Running an example
```
$ cargo run --example convnext --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which tiny
loaded image Tensor[dims 3, 224, 224; f32]
model built
mountain bike, all-terrain bike, off-roader: 84.09%
bicycle-built-for-two, tandem bicycle, tandem: 4.15%
maillot : 0.74%
crash helmet : 0.54%
unicycle, monocycle : 0.44%
```
| candle/candle-examples/examples/convnext/README.md/0 | {
"file_path": "candle/candle-examples/examples/convnext/README.md",
"repo_id": "candle",
"token_count": 293
} | 33 |
//! Depth Anything V2
//! https://huggingface.co/spaces/depth-anything/Depth-Anything-V2
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use clap::Parser;
use std::{ffi::OsString, path::PathBuf, sync::Arc};
use candle::DType::{F32, U8};
use candle::{DType, Device, Module, Result, Tensor};
use candle_examples::{load_image, load_image_and_resize, save_image};
use candle_nn::VarBuilder;
use candle_transformers::models::depth_anything_v2::{DepthAnythingV2, DepthAnythingV2Config};
use candle_transformers::models::dinov2;
use crate::color_map::SpectralRColormap;
mod color_map;
// taken these from: https://huggingface.co/spaces/depth-anything/Depth-Anything-V2/blob/main/depth_anything_v2/dpt.py#L207
const MAGIC_MEAN: [f32; 3] = [0.485, 0.456, 0.406];
const MAGIC_STD: [f32; 3] = [0.229, 0.224, 0.225];
const DINO_IMG_SIZE: usize = 518;
#[derive(Parser)]
struct Args {
#[arg(long)]
dinov2_model: Option<PathBuf>,
#[arg(long)]
depth_anything_v2_model: Option<PathBuf>,
#[arg(long)]
image: PathBuf,
#[arg(long)]
output_dir: Option<PathBuf>,
#[arg(long)]
cpu: bool,
#[arg(long)]
color_map: bool,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let dinov2_model_file = match args.dinov2_model {
None => {
let api = hf_hub::api::sync::Api::new()?;
let api = api.model("lmz/candle-dino-v2".into());
api.get("dinov2_vits14.safetensors")?
}
Some(dinov2_model) => dinov2_model,
};
println!("Using file {:?}", dinov2_model_file);
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[dinov2_model_file], F32, &device)? };
let dinov2 = dinov2::vit_small(vb)?;
println!("DinoV2 model built");
let depth_anything_model_file = match args.depth_anything_v2_model {
None => {
let api = hf_hub::api::sync::Api::new()?;
let api = api.model("jeroenvlek/depth-anything-v2-safetensors".into());
api.get("depth_anything_v2_vits.safetensors")?
}
Some(depth_anything_model) => depth_anything_model,
};
println!("Using file {:?}", depth_anything_model_file);
let vb = unsafe {
VarBuilder::from_mmaped_safetensors(&[depth_anything_model_file], DType::F32, &device)?
};
let config = DepthAnythingV2Config::vit_small();
let depth_anything = DepthAnythingV2::new(Arc::new(dinov2), config, vb)?;
let (original_height, original_width, image) = load_and_prep_image(&args.image, &device)?;
println!("Loaded image {image:?}");
let depth = depth_anything.forward(&image)?;
println!("Got predictions {:?}", depth.shape());
let output_image = post_process_image(&depth, original_height, original_width, args.color_map)?;
let output_path = full_output_path(&args.image, &args.output_dir);
println!("Saving image to {}", output_path.to_string_lossy());
save_image(&output_image, output_path)?;
Ok(())
}
fn full_output_path(image_path: &PathBuf, output_dir: &Option<PathBuf>) -> PathBuf {
let input_file_name = image_path.file_name().unwrap();
let mut output_file_name = OsString::from("depth_");
output_file_name.push(input_file_name);
let mut output_path = match output_dir {
None => image_path.parent().unwrap().to_path_buf(),
Some(output_path) => output_path.clone(),
};
output_path.push(output_file_name);
output_path
}
fn load_and_prep_image(
image_path: &PathBuf,
device: &Device,
) -> anyhow::Result<(usize, usize, Tensor)> {
let (_original_image, original_height, original_width) = load_image(&image_path, None)?;
let image = load_image_and_resize(&image_path, DINO_IMG_SIZE, DINO_IMG_SIZE)?
.unsqueeze(0)?
.to_dtype(F32)?
.to_device(&device)?;
let max_pixel_val = Tensor::try_from(255.0f32)?
.to_device(&device)?
.broadcast_as(image.shape())?;
let image = (image / max_pixel_val)?;
let image = normalize_image(&image, &MAGIC_MEAN, &MAGIC_STD)?;
Ok((original_height, original_width, image))
}
fn normalize_image(image: &Tensor, mean: &[f32; 3], std: &[f32; 3]) -> Result<Tensor> {
let mean_tensor =
Tensor::from_vec(mean.to_vec(), (3, 1, 1), &image.device())?.broadcast_as(image.shape())?;
let std_tensor =
Tensor::from_vec(std.to_vec(), (3, 1, 1), &image.device())?.broadcast_as(image.shape())?;
image.sub(&mean_tensor)?.div(&std_tensor)
}
fn post_process_image(
image: &Tensor,
original_height: usize,
original_width: usize,
color_map: bool,
) -> Result<Tensor> {
let out = image.interpolate2d(original_height, original_width)?;
let out = scale_image(&out)?;
let out = if color_map {
let spectral_r = SpectralRColormap::new();
spectral_r.gray2color(&out)?
} else {
let rgb_slice = [&out, &out, &out];
Tensor::cat(&rgb_slice, 0)?.squeeze(1)?
};
let max_pixel_val = Tensor::try_from(255.0f32)?
.to_device(out.device())?
.broadcast_as(out.shape())?;
let out = (out * max_pixel_val)?;
out.to_dtype(U8)
}
fn scale_image(depth: &Tensor) -> Result<Tensor> {
let flat_values: Vec<f32> = depth.flatten_all()?.to_vec1()?;
let min_val = flat_values.iter().min_by(|a, b| a.total_cmp(b)).unwrap();
let max_val = flat_values.iter().max_by(|a, b| a.total_cmp(b)).unwrap();
let min_val_tensor = Tensor::try_from(*min_val)?
.to_device(depth.device())?
.broadcast_as(depth.shape())?;
let depth = (depth - min_val_tensor)?;
let range = max_val - min_val;
let range_tensor = Tensor::try_from(range)?
.to_device(depth.device())?
.broadcast_as(depth.shape())?;
depth / range_tensor
}
| candle/candle-examples/examples/depth_anything_v2/main.rs/0 | {
"file_path": "candle/candle-examples/examples/depth_anything_v2/main.rs",
"repo_id": "candle",
"token_count": 2544
} | 34 |
//! EVA-02: Explore the limits of Visual representation at scAle
//! https://github.com/baaivision/EVA
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use clap::Parser;
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{Module, VarBuilder};
use candle_transformers::models::eva2;
/// Loads an image from disk using the image crate, this returns a tensor with shape
/// (3, 448, 448). OpenAI normalization is applied.
pub fn load_image448_openai_norm<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> {
let img = image::ImageReader::open(p)?
.decode()
.map_err(candle::Error::wrap)?
.resize_to_fill(448, 448, image::imageops::FilterType::Triangle);
let img = img.to_rgb8();
let data = img.into_raw();
let data = Tensor::from_vec(data, (448, 448, 3), &Device::Cpu)?.permute((2, 0, 1))?;
let mean =
Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], &Device::Cpu)?.reshape((3, 1, 1))?;
let std = Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], &Device::Cpu)?
.reshape((3, 1, 1))?;
(data.to_dtype(candle::DType::F32)? / 255.)?
.broadcast_sub(&mean)?
.broadcast_div(&std)
}
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let image = load_image448_openai_norm(args.image)?.to_device(&device)?;
println!("loaded image {image:?}");
let model_file = match args.model {
None => {
let api = hf_hub::api::sync::Api::new()?;
let api = api.model("vincent-espitalier/candle-eva2".into());
api.get("eva02_base_patch14_448.mim_in22k_ft_in22k_in1k_adapted.safetensors")?
}
Some(model) => model.into(),
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
let model = eva2::vit_base(vb)?;
println!("model built");
let logits = model.forward(&image.unsqueeze(0)?)?;
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
.i(0)?
.to_vec1::<f32>()?;
let mut prs = prs.iter().enumerate().collect::<Vec<_>>();
prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for &(category_idx, pr) in prs.iter().take(5) {
println!(
"{:24}: {:.2}%",
candle_examples::imagenet::CLASSES[category_idx],
100. * pr
);
}
Ok(())
}
| candle/candle-examples/examples/eva2/main.rs/0 | {
"file_path": "candle/candle-examples/examples/eva2/main.rs",
"repo_id": "candle",
"token_count": 1221
} | 35 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::{Error as E, Result};
use clap::Parser;
use candle_transformers::models::qwen2::{Config, Model};
use candle::{DType, Tensor};
use candle_nn::VarBuilder;
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::{
utils::padding::{PaddingDirection, PaddingParams, PaddingStrategy},
Tokenizer,
};
// gte-Qwen1.5-7B-instruct use EOS token as padding token
const EOS_TOKEN: &str = "<|endoftext|>";
const EOS_TOKEN_ID: u32 = 151643;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long, default_value = "Alibaba-NLP/gte-Qwen1.5-7B-instruct")]
model_id: String,
#[arg(long, default_value = "main")]
revision: String,
#[arg(long)]
local_repo: Option<String>,
}
#[derive(Debug)]
struct ConfigFiles {
pub config: std::path::PathBuf,
pub tokenizer: std::path::PathBuf,
pub weights: Vec<std::path::PathBuf>,
}
// Loading the model from the HuggingFace Hub. Network access is required.
fn load_from_hub(model_id: &str, revision: &str) -> Result<ConfigFiles> {
let api = Api::new()?;
let repo = api.repo(Repo::with_revision(
model_id.to_string(),
RepoType::Model,
revision.to_string(),
));
Ok(ConfigFiles {
config: repo.get("config.json")?,
tokenizer: repo.get("tokenizer.json")?,
weights: candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?,
})
}
// Loading the model from a local directory.
fn load_from_local(local_path: &str) -> Result<ConfigFiles> {
let local_path = std::path::PathBuf::from(local_path);
let weight_path = local_path.join("model.safetensors.index.json");
let json: serde_json::Value = serde_json::from_str(&std::fs::read_to_string(weight_path)?)?;
let weight_map = match json.get("weight_map") {
Some(serde_json::Value::Object(map)) => map,
Some(_) => panic!("`weight map` is not a map"),
None => panic!("`weight map` not found"),
};
let mut safetensors_files = std::collections::HashSet::new();
for value in weight_map.values() {
safetensors_files.insert(
value
.as_str()
.expect("Weight files should be parsed as strings"),
);
}
let safetensors_paths = safetensors_files
.iter()
.map(|v| local_path.join(v))
.collect::<Vec<_>>();
Ok(ConfigFiles {
config: local_path.join("config.json"),
tokenizer: local_path.join("tokenizer.json"),
weights: safetensors_paths,
})
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
// Fetch the model. Do this offline if local path provided.
println!("Fetching model files...");
let start = std::time::Instant::now();
let config_files = match args.local_repo {
Some(local_path) => load_from_local(&local_path)?,
None => load_from_hub(&args.model_id, &args.revision)?,
};
println!("Model file retrieved in {:?}", start.elapsed());
// Inputs will be padded to the longest sequence in the batch.
let padding = PaddingParams {
strategy: PaddingStrategy::BatchLongest,
direction: PaddingDirection::Left,
pad_to_multiple_of: None,
pad_id: EOS_TOKEN_ID,
pad_type_id: 0,
pad_token: String::from(EOS_TOKEN),
};
// Tokenizer setup
let mut tokenizer = Tokenizer::from_file(config_files.tokenizer).map_err(E::msg)?;
tokenizer.with_padding(Some(padding));
// Model initialization
let device = candle_examples::device(args.cpu)?;
let dtype = if device.is_cuda() {
DType::BF16
} else {
DType::F32
};
let config: Config = serde_json::from_slice(&std::fs::read(config_files.config)?)?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&config_files.weights, dtype, &device)? };
let mut model = Model::new(&config, vb)?;
println!("Model loaded in {:?}", start.elapsed());
// Encode the queries and the targets
let instruct = "Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery: ";
let documents = vec![
format!("{instruct}how much protein should a female eat{EOS_TOKEN}"),
format!("{instruct}summit define{EOS_TOKEN}"),
format!("As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.{EOS_TOKEN}"),
format!("Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments.{EOS_TOKEN}"),
];
let encoded = tokenizer.encode_batch(documents, true).map_err(E::msg)?;
let tokens: Vec<&[u32]> = encoded.iter().map(|x| x.get_ids()).collect();
let tokens = Tensor::new(tokens, &device)?;
let mask: Vec<&[u32]> = encoded.iter().map(|x| x.get_attention_mask()).collect();
let mask = Tensor::new(mask, &device)?;
// Inference
let start_gen = std::time::Instant::now();
let logits = model.forward(&tokens, 0, Some(&mask))?;
// Extract the last hidden states as embeddings since inputs are padded left.
let (_, seq_len, _) = logits.dims3()?;
let embd = logits
.narrow(1, seq_len - 1, 1)?
.squeeze(1)?
.to_dtype(DType::F32)?;
// Calculate the relativity scores. Note the embeddings should be normalized.
let norm = embd.broadcast_div(&embd.sqr()?.sum_keepdim(1)?.sqrt()?)?;
let scores = norm.narrow(0, 0, 2)?.matmul(&norm.narrow(0, 2, 2)?.t()?)?;
// Print the results
println!("Embedding done in {:?}", start_gen.elapsed());
println!("Scores: {:?}", scores.to_vec2::<f32>()?);
Ok(())
}
| candle/candle-examples/examples/gte-qwen/main.rs/0 | {
"file_path": "candle/candle-examples/examples/gte-qwen/main.rs",
"repo_id": "candle",
"token_count": 2613
} | 36 |
pub mod constants;
pub mod conversation;
pub mod image_processor;
use candle_transformers::generation::{LogitsProcessor, Sampling};
use candle_transformers::models::llama::Cache;
use anyhow::{bail, Error as E, Result};
use candle::{DType, Device, IndexOp, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::models::llava::config::{
HFGenerationConfig, HFLLaVAConfig, HFPreProcessorConfig,
};
use candle_transformers::models::llava::{config::LLaVAConfig, LLaVA};
use clap::Parser;
use constants::*;
use conversation::Conversation;
use hf_hub::api::sync::Api;
use image_processor::{process_image, ImageProcessor};
use std::io::Write;
use tokenizers::Tokenizer;
#[derive(Parser, Debug)]
#[command(author, version, about,long_about=None)]
struct Args {
#[arg(long, default_value = "llava-hf/llava-v1.6-vicuna-7b-hf")]
model_path: String,
#[arg(long, default_value = "tokenizer/tokenizer.json")]
tokenizer_path: String,
#[arg(long)]
model_base: Option<String>,
#[arg(long)]
image_file: String, // Required
#[arg(long)]
conv_mode: Option<String>,
#[arg(long, default_value_t = 0.2)]
temperature: f32,
#[arg(long, default_value_t = 512)]
max_new_tokens: usize,
#[arg(long, action)]
hf: bool,
#[arg(long, action)]
cpu: bool,
#[arg(long, action)]
no_kv_cache: bool,
#[arg(long)]
prompt: String,
/// The seed to use when generating random samples. Copy from candle llama. Not exist in python llava.
#[arg(long, default_value_t = 299792458)]
seed: u64,
}
//from https://github.com/huggingface/candle/blob/main/candle-examples/examples/clip/main.rs
fn load_image<T: AsRef<std::path::Path>>(
path: T,
processor: &ImageProcessor,
llava_config: &LLaVAConfig,
dtype: DType,
) -> Result<((u32, u32), Tensor)> {
let img = image::ImageReader::open(path)?.decode()?;
let img_tensor = process_image(&img, processor, llava_config)?;
Ok(((img.width(), img.height()), img_tensor.to_dtype(dtype)?))
}
fn get_model_name_from_path(model_path: &str) -> String {
let model_paths: Vec<String> = model_path
.trim_matches('/')
.split('/')
.map(|s| s.to_string())
.collect();
if model_paths.last().unwrap().starts_with("checkpoint-") {
format!(
"{}_{}",
model_paths[model_paths.len() - 2],
model_paths.last().unwrap()
)
} else {
model_paths.last().unwrap().to_string()
}
}
fn duplicate_vec<T>(vec: &[T], n: usize) -> Vec<T>
where
T: Clone,
{
let mut res = Vec::new();
for _ in 0..n {
res.extend(vec.to_owned());
}
res
}
fn insert_separator<T>(x: Vec<Vec<T>>, sep: Vec<T>) -> Vec<Vec<T>>
where
T: Clone,
{
let sep = vec![sep];
let sep = duplicate_vec(&sep, x.len());
let mut res = x
.iter()
.zip(sep.iter())
.flat_map(|(x, y)| vec![x.clone(), y.clone()])
.collect::<Vec<Vec<T>>>();
res.pop();
res
}
fn tokenizer_image_token(
prompt: &str,
tokenizer: &Tokenizer,
image_token_index: i64,
llava_config: &LLaVAConfig,
) -> Result<Tensor> {
let prompt_chunks = prompt
.split("<image>")
.map(|s| {
tokenizer
.encode(s, true)
.unwrap()
.get_ids()
.to_vec()
.iter()
.map(|x| *x as i64)
.collect()
})
.collect::<Vec<Vec<i64>>>();
let mut input_ids = Vec::new();
let mut offset = 0;
if !prompt_chunks.is_empty()
&& !prompt_chunks[0].is_empty()
&& prompt_chunks[0][0] == llava_config.bos_token_id as i64
{
offset = 1;
input_ids.push(prompt_chunks[0][0]);
}
for x in insert_separator(
prompt_chunks,
duplicate_vec(&[image_token_index], offset + 1),
)
.iter()
{
input_ids.extend(x[1..].to_vec())
}
let input_len = input_ids.len();
Tensor::from_vec(input_ids, (1, input_len), &Device::Cpu).map_err(E::msg)
}
fn main() -> Result<()> {
let mut args = Args::parse();
let device = candle_examples::device(args.cpu)?;
println!("Start loading model");
let api = Api::new()?;
let api = api.model(args.model_path.clone());
let (llava_config, tokenizer, clip_vision_config, image_processor) = if args.hf {
let config_filename = api.get("config.json")?;
let hf_llava_config: HFLLaVAConfig =
serde_json::from_slice(&std::fs::read(config_filename)?)?;
let generation_config_filename = api.get("generation_config.json")?;
let generation_config: HFGenerationConfig =
serde_json::from_slice(&std::fs::read(generation_config_filename)?)?;
let preprocessor_config_filename = api.get("preprocessor_config.json")?;
let preprocessor_config: HFPreProcessorConfig =
serde_json::from_slice(&std::fs::read(preprocessor_config_filename)?)?;
let llava_config =
hf_llava_config.to_llava_config(&generation_config, &preprocessor_config);
let tokenizer_filename = api.get("tokenizer.json")?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let clip_vision_config = hf_llava_config.to_clip_vision_config();
(
llava_config,
tokenizer,
Some(clip_vision_config),
ImageProcessor::from_hf_preprocessor_config(&preprocessor_config),
)
} else {
let config_filename = api.get("config.json")?;
let llava_config: LLaVAConfig = serde_json::from_slice(&std::fs::read(config_filename)?)?;
let tokenizer = Tokenizer::from_file(&args.tokenizer_path)
.map_err(|e| E::msg(format!("Error loading {}: {}", &args.tokenizer_path, e)))?;
(
llava_config.clone(),
tokenizer,
None,
ImageProcessor::from_pretrained(&llava_config.mm_vision_tower.unwrap())?,
)
};
let llama_config = llava_config.to_llama_config();
let dtype: DType = match llava_config.torch_dtype.as_str() {
"float16" => DType::F16,
"bfloat16" => DType::BF16,
_ => bail!("unsupported dtype"),
};
let eos_token_id = llava_config.eos_token_id;
println!("setting kv cache");
let mut cache = Cache::new(!args.no_kv_cache, dtype, &llama_config, &device)?;
println!("loading model weights");
let weight_filenames =
candle_examples::hub_load_safetensors(&api, "model.safetensors.index.json")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&weight_filenames, dtype, &device)? };
let llava: LLaVA = LLaVA::load(vb, &llava_config, clip_vision_config)?;
println!("generating conv template");
let image_token_se =
format!("{DEFAULT_IM_START_TOKEN}{DEFAULT_IMAGE_TOKEN}{DEFAULT_IM_END_TOKEN}");
let qs = if args.prompt.contains(IMAGE_PLACEHOLDER) {
if llava_config.mm_use_im_start_end {
args.prompt.replace(IMAGE_PLACEHOLDER, &image_token_se)
} else {
args.prompt.replace(IMAGE_PLACEHOLDER, DEFAULT_IMAGE_TOKEN)
}
} else if llava_config.mm_use_im_start_end {
format!("{}\n{}", image_token_se, args.prompt)
} else {
format!("{}\n{}", DEFAULT_IMAGE_TOKEN, args.prompt)
};
let model_name = get_model_name_from_path(&args.model_path).to_lowercase();
let conv_mode = if model_name.contains("llama-2") {
"llava_llama_2"
} else if model_name.contains("mistral") {
"mistral_instruct"
} else if model_name.contains("v1.6-34b") {
"chatml_direct"
} else if model_name.contains("v1") {
"llava_v1"
} else if model_name.contains("mpt") {
"mpt"
} else {
"llava_v0"
};
if args.conv_mode.is_some() && args.conv_mode.as_deref() != Some(conv_mode) {
println!(
"Warning: the model is trained with {}, but you are using {}",
conv_mode,
args.conv_mode.as_deref().unwrap()
);
} else {
args.conv_mode = Some(conv_mode.to_string());
}
let mut conv = match args.conv_mode {
Some(conv_mode) => match conv_mode.as_str() {
"chatml_direct" => Conversation::conv_chatml_direct(),
"llava_v1" => Conversation::conv_llava_v1(),
_ => todo!("not implement yet"),
},
None => bail!("conv_mode is required"),
};
conv.append_user_message(Some(&qs));
conv.append_assistant_message(None);
let prompt = conv.get_prompt();
println!("loading image");
let (image_size, image_tensor) =
load_image(&args.image_file, &image_processor, &llava_config, dtype)
.map_err(|e| E::msg(format!("Error loading {}: {}", &args.image_file, e)))?;
let image_tensor = image_tensor.to_device(&device)?;
let mut logits_processor = {
let temperature = f64::from(args.temperature);
let sampling = if temperature <= 0. {
Sampling::ArgMax
} else {
Sampling::All { temperature }
};
LogitsProcessor::from_sampling(args.seed, sampling)
};
// get input tokens
let tokens = tokenizer_image_token(
&prompt,
&tokenizer,
llava_config.image_token_index as i64,
&llava_config,
)?;
let mut input_embeds =
llava.prepare_inputs_labels_for_multimodal(&tokens, &[image_tensor], &[image_size])?;
//inference loop, based on https://github.com/huggingface/candle/blob/main/candle-examples/examples/llama/main.rs
let mut tokenizer = candle_examples::token_output_stream::TokenOutputStream::new(tokenizer);
let mut index_pos = 0;
for index in 0..args.max_new_tokens {
let (_, input_embeds_len, _) = input_embeds.dims3()?;
let (context_size, context_index) = if cache.use_kv_cache && index > 0 {
(1, index_pos)
} else {
(input_embeds_len, 0)
};
let input = input_embeds.i((.., input_embeds_len.saturating_sub(context_size).., ..))?;
let logits = llava.forward(&input, context_index, &mut cache)?; //[1,32000]
let logits = logits.squeeze(0)?;
let (_, input_len, _) = input.dims3()?;
index_pos += input_len;
let next_token = logits_processor.sample(&logits)?;
let next_token_tensor = Tensor::from_vec(vec![next_token], 1, &device)?;
let next_embeds = llava.llama.embed(&next_token_tensor)?.unsqueeze(0)?;
input_embeds = Tensor::cat(&[input_embeds, next_embeds], 1)?;
if next_token == eos_token_id as u32 {
break;
}
if let Some(t) = tokenizer.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
}
if let Some(rest) = tokenizer.decode_rest().map_err(E::msg)? {
print!("{rest}");
}
Ok(())
}
| candle/candle-examples/examples/llava/main.rs/0 | {
"file_path": "candle/candle-examples/examples/llava/main.rs",
"repo_id": "candle",
"token_count": 5082
} | 37 |
# candle-mistral: 7b LLM with Apache 2.0 licensed weights
Mistral-7B-v0.1 is a pretrained generative LLM with 7 billion parameters. It outperforms all the publicly available 13b models
as of 2023-09-28. Weights (and the original Python model code) are released under the permissive Apache 2.0 license.
- [Blog post](https://mistral.ai/news/announcing-mistral-7b/) from Mistral announcing the model release.
- [Model card](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the
HuggingFace Hub.
This example supports the initial model as well as a quantized variant.
## Running the example
```bash
$ cargo run --example mistral --release --features cuda -- --prompt 'Write helloworld code in Rust' --sample-len 150
Generated text:
Write helloworld code in Rust
=============================
This is a simple example of how to write "Hello, world!" program in Rust.
## Compile and run
``bash
$ cargo build --release
Compiling hello-world v0.1.0 (/home/user/rust/hello-world)
Finished release [optimized] target(s) in 0.26s
$ ./target/release/hello-world
Hello, world!
``
## Source code
``rust
fn main() {
println!("Hello, world!");
}
``
## License
This example is released under the terms
```
## Running the quantized version of the model
```bash
$ cargo run --example mistral --features accelerate --release -- \
$ --prompt "Here is a sample quick sort implementation in rust " --quantized -n 400
avx: false, neon: true, simd128: false, f16c: false
temp: 0.00 repeat-penalty: 1.10 repeat-last-n: 64
retrieved the files in 562.292µs
loaded the model in 1.100323667s
Here is a sample quick sort implementation in rust
``rust
fn quick_sort(arr: &mut [i32]) {
if arr.len() <= 1 {
return;
}
let pivot = arr[0];
let mut left = vec![];
let mut right = vec![];
for i in 1..arr.len() {
if arr[i] < pivot {
left.push(arr[i]);
} else {
right.push(arr[i]);
}
}
quick_sort(&mut left);
quick_sort(&mut right);
let mut i = 0;
for _ in &left {
arr[i] = left.pop().unwrap();
i += 1;
}
for _ in &right {
arr[i] = right.pop().unwrap();
i += 1;
}
}
``
226 tokens generated (10.91 token/s)
```
| candle/candle-examples/examples/mistral/README.md/0 | {
"file_path": "candle/candle-examples/examples/mistral/README.md",
"repo_id": "candle",
"token_count": 829
} | 38 |
# candle-musicgen
Candle implementation of musicgen from [Simple and Controllable Music Generation](https://arxiv.org/pdf/2306.05284).
## Running an example
```bash
$ cargo run --example musicgen -- --prompt "90s rock song with loud guitars and heavy drums"
> tokens: [2777, 7, 2480, 2324, 28, 8002, 5507, 7, 11, 2437, 5253, 7, 1]
> Tensor[dims 1, 13; u32]
> [[[ 0.0902, 0.1256, -0.0585, ..., 0.1057, -0.5141, -0.4675],
> [ 0.1972, -0.0268, -0.3368, ..., -0.0495, -0.3597, -0.3940],
> [-0.0855, -0.0007, 0.2225, ..., -0.2804, -0.5360, -0.2436],
> ...
> [ 0.0515, 0.0235, -0.3855, ..., -0.4728, -0.6858, -0.2923],
> [-0.3728, -0.1442, -0.1179, ..., -0.4388, -0.0287, -0.3242],
> [ 0.0163, 0.0012, -0.0020, ..., 0.0142, 0.0173, -0.0103]]]
> Tensor[[1, 13, 768], f32]
``` | candle/candle-examples/examples/musicgen/README.md/0 | {
"file_path": "candle/candle-examples/examples/musicgen/README.md",
"repo_id": "candle",
"token_count": 400
} | 39 |
# candle-parler-tts
[Parler-TTS](https://huggingface.co/parler-tts/parler-tts-large-v1) is a large
text-to-speech model with 2.2B parameters trained on ~45K hours of audio data.
The voice can be controlled by a text prompt.
## Run an example
```bash
cargo run --example parler-tts -r -- \
--prompt "Hey, how are you doing today?"
```
In order to specify some prompt for the voice, use the `--description` argument.
```bash
cargo run --example parler-tts -r -- \
--prompt "Hey, how are you doing today?" \
--description "A female speaker delivers a slightly expressive and animated speech with a moderate speed and pitch. The recording is of very high quality, with the speaker's voice sounding clear and very close up."
```
https://github.com/user-attachments/assets/1b16aeac-70a3-4803-8589-4563279bba33
| candle/candle-examples/examples/parler-tts/README.md/0 | {
"file_path": "candle/candle-examples/examples/parler-tts/README.md",
"repo_id": "candle",
"token_count": 260
} | 40 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use std::io::Write;
use std::path::PathBuf;
use candle_transformers::models::quantized_t5 as t5;
use anyhow::{Error as E, Result};
use candle::{Device, Tensor};
use candle_transformers::generation::LogitsProcessor;
use clap::{Parser, ValueEnum};
use hf_hub::{api::sync::Api, api::sync::ApiRepo, Repo, RepoType};
use tokenizers::Tokenizer;
#[derive(Clone, Debug, Copy, ValueEnum)]
enum Which {
T5Small,
FlanT5Small,
FlanT5Base,
FlanT5Large,
FlanT5Xl,
FlanT5Xxl,
}
#[derive(Parser, Debug, Clone)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// The model repository to use on the HuggingFace hub.
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
#[arg(long)]
weight_file: Option<String>,
#[arg(long)]
config_file: Option<String>,
// Enable/disable decoding.
#[arg(long, default_value = "false")]
disable_cache: bool,
/// Use this prompt, otherwise compute sentence similarities.
#[arg(long)]
prompt: String,
/// The temperature used to generate samples.
#[arg(long, default_value_t = 0.8)]
temperature: f64,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
/// The model size to use.
#[arg(long, default_value = "t5-small")]
which: Which,
}
struct T5ModelBuilder {
device: Device,
config: t5::Config,
weights_filename: PathBuf,
}
impl T5ModelBuilder {
pub fn load(args: &Args) -> Result<(Self, Tokenizer)> {
let device = Device::Cpu;
let default_model = "lmz/candle-quantized-t5".to_string();
let (model_id, revision) = match (args.model_id.to_owned(), args.revision.to_owned()) {
(Some(model_id), Some(revision)) => (model_id, revision),
(Some(model_id), None) => (model_id, "main".to_string()),
(None, Some(revision)) => (default_model, revision),
(None, None) => (default_model, "main".to_string()),
};
let repo = Repo::with_revision(model_id, RepoType::Model, revision);
let api = Api::new()?;
let api = api.repo(repo);
let config_filename = match &args.config_file {
Some(filename) => Self::get_local_or_remote_file(filename, &api)?,
None => match args.which {
Which::T5Small => api.get("config.json")?,
Which::FlanT5Small => api.get("config-flan-t5-small.json")?,
Which::FlanT5Base => api.get("config-flan-t5-base.json")?,
Which::FlanT5Large => api.get("config-flan-t5-large.json")?,
Which::FlanT5Xl => api.get("config-flan-t5-xl.json")?,
Which::FlanT5Xxl => api.get("config-flan-t5-xxl.json")?,
},
};
let tokenizer_filename = api.get("tokenizer.json")?;
let weights_filename = match &args.weight_file {
Some(filename) => Self::get_local_or_remote_file(filename, &api)?,
None => match args.which {
Which::T5Small => api.get("model.gguf")?,
Which::FlanT5Small => api.get("model-flan-t5-small.gguf")?,
Which::FlanT5Base => api.get("model-flan-t5-base.gguf")?,
Which::FlanT5Large => api.get("model-flan-t5-large.gguf")?,
Which::FlanT5Xl => api.get("model-flan-t5-xl.gguf")?,
Which::FlanT5Xxl => api.get("model-flan-t5-xxl.gguf")?,
},
};
let config = std::fs::read_to_string(config_filename)?;
let mut config: t5::Config = serde_json::from_str(&config)?;
config.use_cache = !args.disable_cache;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
Ok((
Self {
device,
config,
weights_filename,
},
tokenizer,
))
}
pub fn build_model(&self) -> Result<t5::T5ForConditionalGeneration> {
let device = Device::Cpu;
let vb = t5::VarBuilder::from_gguf(&self.weights_filename, &device)?;
Ok(t5::T5ForConditionalGeneration::load(vb, &self.config)?)
}
fn get_local_or_remote_file(filename: &str, api: &ApiRepo) -> Result<PathBuf> {
let local_filename = std::path::PathBuf::from(filename);
if local_filename.exists() {
Ok(local_filename)
} else {
Ok(api.get(filename)?)
}
}
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let (builder, mut tokenizer) = T5ModelBuilder::load(&args)?;
let device = &builder.device;
let tokenizer = tokenizer
.with_padding(None)
.with_truncation(None)
.map_err(E::msg)?;
let tokens = tokenizer
.encode(args.prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?;
let mut model = builder.build_model()?;
let mut output_token_ids = [builder
.config
.decoder_start_token_id
.unwrap_or(builder.config.pad_token_id) as u32]
.to_vec();
let temperature = if args.temperature <= 0. {
None
} else {
Some(args.temperature)
};
let mut logits_processor = LogitsProcessor::new(299792458, temperature, args.top_p);
let encoder_output = model.encode(&input_token_ids)?;
let start = std::time::Instant::now();
for index in 0.. {
if output_token_ids.len() > 512 {
break;
}
let decoder_token_ids = if index == 0 || !builder.config.use_cache {
Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)?
} else {
let last_token = *output_token_ids.last().unwrap();
Tensor::new(&[last_token], device)?.unsqueeze(0)?
};
let logits = model
.decode(&decoder_token_ids, &encoder_output)?
.squeeze(0)?;
let logits = if args.repeat_penalty == 1. {
logits
} else {
let start_at = output_token_ids.len().saturating_sub(args.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
args.repeat_penalty,
&output_token_ids[start_at..],
)?
};
let next_token_id = logits_processor.sample(&logits)?;
if next_token_id as usize == builder.config.eos_token_id {
break;
}
output_token_ids.push(next_token_id);
if let Some(text) = tokenizer.id_to_token(next_token_id) {
let text = text.replace('▁', " ").replace("<0x0A>", "\n");
print!("{text}");
std::io::stdout().flush()?;
}
}
let dt = start.elapsed();
println!(
"\n{} tokens generated ({:.2} token/s)\n",
output_token_ids.len(),
output_token_ids.len() as f64 / dt.as_secs_f64(),
);
Ok(())
}
| candle/candle-examples/examples/quantized-t5/main.rs/0 | {
"file_path": "candle/candle-examples/examples/quantized-t5/main.rs",
"repo_id": "candle",
"token_count": 3631
} | 41 |
# candle-replit-code: code completion specialized model.
[replit-code-v1_5-3b](https://huggingface.co/replit/replit-code-v1_5-3b) is a
language model specialized for code completion. This model uses 3.3B parameters
in `bfloat16` (so the GPU version will only work on recent nvidia cards).
## Running some example
```bash
cargo run --example replit-code --release -- --prompt 'def fibonacci(n): '
```
This produces the following output.
```
def fibonacci(n): # write Fibonacci series up to n
"""Print a Fibonacci series up to n."""
a, b = 0, 1
while a < n:
print(a, end=' ')
a, b = b, a+b
print()
def fibonacci_loop(n): # write Fibonacci series up to n
"""Print a Fibonacci series up to n."""
result = []
a, b = 0, 1
while a < n:
result.append(a)
a, b = b, a+b
return result
def fibonacci_generator(n): # write Fibonacci series up to n
"""Print a Fibonacci series up to n."""
a, b = 0, 1
while a < n:
yield a
a, b = b, a+b
```
| candle/candle-examples/examples/replit-code/README.md/0 | {
"file_path": "candle/candle-examples/examples/replit-code/README.md",
"repo_id": "candle",
"token_count": 426
} | 42 |
//! SAM: Segment Anything Model
//! https://github.com/facebookresearch/segment-anything
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::DType;
use candle_nn::VarBuilder;
use candle_transformers::models::segment_anything::sam;
use clap::Parser;
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
#[arg(long)]
generate_masks: bool,
/// List of x,y coordinates, between 0 and 1 (0.5 is at the middle of the image). These points
/// should be part of the generated mask.
#[arg(long)]
point: Vec<String>,
/// List of x,y coordinates, between 0 and 1 (0.5 is at the middle of the image). These points
/// should not be part of the generated mask and should be part of the background instead.
#[arg(long)]
neg_point: Vec<String>,
/// The detection threshold for the mask, 0 is the default value, negative values mean a larger
/// mask, positive makes the mask more selective.
#[arg(long, allow_hyphen_values = true, default_value_t = 0.)]
threshold: f32,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// Use the TinyViT based models from MobileSAM
#[arg(long)]
use_tiny: bool,
}
pub fn main() -> anyhow::Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let device = candle_examples::device(args.cpu)?;
let (image, initial_h, initial_w) =
candle_examples::load_image(&args.image, Some(sam::IMAGE_SIZE))?;
let image = image.to_device(&device)?;
println!("loaded image {image:?}");
let model = match args.model {
Some(model) => std::path::PathBuf::from(model),
None => {
let api = hf_hub::api::sync::Api::new()?;
let api = api.model("lmz/candle-sam".to_string());
let filename = if args.use_tiny {
"mobile_sam-tiny-vitt.safetensors"
} else {
"sam_vit_b_01ec64.safetensors"
};
api.get(filename)?
}
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? };
let sam = if args.use_tiny {
sam::Sam::new_tiny(vb)? // tiny vit_t
} else {
sam::Sam::new(768, 12, 12, &[2, 5, 8, 11], vb)? // sam_vit_b
};
if args.generate_masks {
// Default options similar to the Python version.
let bboxes = sam.generate_masks(
&image,
/* points_per_side */ 32,
/* crop_n_layer */ 0,
/* crop_overlap_ratio */ 512. / 1500.,
/* crop_n_points_downscale_factor */ 1,
)?;
for (idx, bbox) in bboxes.iter().enumerate() {
println!("{idx} {bbox:?}");
let mask = (&bbox.data.to_dtype(DType::U8)? * 255.)?;
let (h, w) = mask.dims2()?;
let mask = mask.broadcast_as((3, h, w))?;
candle_examples::save_image_resize(
&mask,
format!("sam_mask{idx}.png"),
initial_h,
initial_w,
)?;
}
} else {
let iter_points = args.point.iter().map(|p| (p, true));
let iter_neg_points = args.neg_point.iter().map(|p| (p, false));
let points = iter_points
.chain(iter_neg_points)
.map(|(point, b)| {
use std::str::FromStr;
let xy = point.split(',').collect::<Vec<_>>();
if xy.len() != 2 {
anyhow::bail!("expected format for points is 0.4,0.2")
}
Ok((f64::from_str(xy[0])?, f64::from_str(xy[1])?, b))
})
.collect::<anyhow::Result<Vec<_>>>()?;
let start_time = std::time::Instant::now();
let (mask, iou_predictions) = sam.forward(&image, &points, false)?;
println!(
"mask generated in {:.2}s",
start_time.elapsed().as_secs_f32()
);
println!("mask:\n{mask}");
println!("iou_predictions: {iou_predictions}");
let mask = (mask.ge(args.threshold)? * 255.)?;
let (_one, h, w) = mask.dims3()?;
let mask = mask.expand((3, h, w))?;
let mut img = image::ImageReader::open(&args.image)?
.decode()
.map_err(candle::Error::wrap)?;
let mask_pixels = mask.permute((1, 2, 0))?.flatten_all()?.to_vec1::<u8>()?;
let mask_img: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> =
match image::ImageBuffer::from_raw(w as u32, h as u32, mask_pixels) {
Some(image) => image,
None => anyhow::bail!("error saving merged image"),
};
let mask_img = image::DynamicImage::from(mask_img).resize_to_fill(
img.width(),
img.height(),
image::imageops::FilterType::CatmullRom,
);
for x in 0..img.width() {
for y in 0..img.height() {
let mask_p = imageproc::drawing::Canvas::get_pixel(&mask_img, x, y);
if mask_p.0[0] > 100 {
let mut img_p = imageproc::drawing::Canvas::get_pixel(&img, x, y);
img_p.0[2] = 255 - (255 - img_p.0[2]) / 2;
img_p.0[1] /= 2;
img_p.0[0] /= 2;
imageproc::drawing::Canvas::draw_pixel(&mut img, x, y, img_p)
}
}
}
for (x, y, b) in points {
let x = (x * img.width() as f64) as i32;
let y = (y * img.height() as f64) as i32;
let color = if b {
image::Rgba([255, 0, 0, 200])
} else {
image::Rgba([0, 255, 0, 200])
};
imageproc::drawing::draw_filled_circle_mut(&mut img, (x, y), 3, color);
}
img.save("sam_merged.jpg")?
}
Ok(())
}
| candle/candle-examples/examples/segment-anything/main.rs/0 | {
"file_path": "candle/candle-examples/examples/segment-anything/main.rs",
"repo_id": "candle",
"token_count": 3137
} | 43 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::{DType, IndexOp, D};
use candle_nn::{ModuleT, VarBuilder};
use candle_transformers::models::vgg::{Models, Vgg};
use clap::{Parser, ValueEnum};
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Which {
Vgg13,
Vgg16,
Vgg19,
}
#[derive(Parser)]
struct Args {
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Variant of the model to use.
#[arg(value_enum, long, default_value_t = Which::Vgg13)]
which: Which,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?;
println!("loaded image {image:?}");
let api = hf_hub::api::sync::Api::new()?;
let repo = match args.which {
Which::Vgg13 => "timm/vgg13.tv_in1k",
Which::Vgg16 => "timm/vgg16.tv_in1k",
Which::Vgg19 => "timm/vgg19.tv_in1k",
};
let api = api.model(repo.into());
let filename = "model.safetensors";
let model_file = api.get(filename)?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
let model = match args.which {
Which::Vgg13 => Vgg::new(vb, Models::Vgg13)?,
Which::Vgg16 => Vgg::new(vb, Models::Vgg16)?,
Which::Vgg19 => Vgg::new(vb, Models::Vgg19)?,
};
let logits = model.forward_t(&image, /*train=*/ false)?;
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
.i(0)?
.to_vec1::<f32>()?;
// Sort the predictions and take the top 5
let mut top: Vec<_> = prs.iter().enumerate().collect();
top.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap());
let top = top.into_iter().take(5).collect::<Vec<_>>();
// Print the top predictions
for &(i, p) in &top {
println!(
"{:50}: {:.2}%",
candle_examples::imagenet::CLASSES[i],
p * 100.0
);
}
Ok(())
}
| candle/candle-examples/examples/vgg/main.rs/0 | {
"file_path": "candle/candle-examples/examples/vgg/main.rs",
"repo_id": "candle",
"token_count": 967
} | 44 |
use candle::{IndexOp, Result, Tensor, D};
use tokenizers::Tokenizer;
const LANGUAGES: [(&str, &str); 99] = [
("en", "english"),
("zh", "chinese"),
("de", "german"),
("es", "spanish"),
("ru", "russian"),
("ko", "korean"),
("fr", "french"),
("ja", "japanese"),
("pt", "portuguese"),
("tr", "turkish"),
("pl", "polish"),
("ca", "catalan"),
("nl", "dutch"),
("ar", "arabic"),
("sv", "swedish"),
("it", "italian"),
("id", "indonesian"),
("hi", "hindi"),
("fi", "finnish"),
("vi", "vietnamese"),
("he", "hebrew"),
("uk", "ukrainian"),
("el", "greek"),
("ms", "malay"),
("cs", "czech"),
("ro", "romanian"),
("da", "danish"),
("hu", "hungarian"),
("ta", "tamil"),
("no", "norwegian"),
("th", "thai"),
("ur", "urdu"),
("hr", "croatian"),
("bg", "bulgarian"),
("lt", "lithuanian"),
("la", "latin"),
("mi", "maori"),
("ml", "malayalam"),
("cy", "welsh"),
("sk", "slovak"),
("te", "telugu"),
("fa", "persian"),
("lv", "latvian"),
("bn", "bengali"),
("sr", "serbian"),
("az", "azerbaijani"),
("sl", "slovenian"),
("kn", "kannada"),
("et", "estonian"),
("mk", "macedonian"),
("br", "breton"),
("eu", "basque"),
("is", "icelandic"),
("hy", "armenian"),
("ne", "nepali"),
("mn", "mongolian"),
("bs", "bosnian"),
("kk", "kazakh"),
("sq", "albanian"),
("sw", "swahili"),
("gl", "galician"),
("mr", "marathi"),
("pa", "punjabi"),
("si", "sinhala"),
("km", "khmer"),
("sn", "shona"),
("yo", "yoruba"),
("so", "somali"),
("af", "afrikaans"),
("oc", "occitan"),
("ka", "georgian"),
("be", "belarusian"),
("tg", "tajik"),
("sd", "sindhi"),
("gu", "gujarati"),
("am", "amharic"),
("yi", "yiddish"),
("lo", "lao"),
("uz", "uzbek"),
("fo", "faroese"),
("ht", "haitian creole"),
("ps", "pashto"),
("tk", "turkmen"),
("nn", "nynorsk"),
("mt", "maltese"),
("sa", "sanskrit"),
("lb", "luxembourgish"),
("my", "myanmar"),
("bo", "tibetan"),
("tl", "tagalog"),
("mg", "malagasy"),
("as", "assamese"),
("tt", "tatar"),
("haw", "hawaiian"),
("ln", "lingala"),
("ha", "hausa"),
("ba", "bashkir"),
("jw", "javanese"),
("su", "sundanese"),
];
/// Returns the token id for the selected language.
pub fn detect_language(
model: &mut super::Model,
tokenizer: &Tokenizer,
mel: &Tensor,
) -> Result<u32> {
let (_bsize, _, seq_len) = mel.dims3()?;
let mel = mel.narrow(
2,
0,
usize::min(seq_len, model.config().max_source_positions),
)?;
let device = mel.device();
let language_token_ids = LANGUAGES
.iter()
.map(|(t, _)| crate::token_id(tokenizer, &format!("<|{t}|>")))
.collect::<Result<Vec<_>>>()?;
let sot_token = crate::token_id(tokenizer, crate::m::SOT_TOKEN)?;
let audio_features = model.encoder_forward(&mel, true)?;
let tokens = Tensor::new(&[[sot_token]], device)?;
let language_token_ids = Tensor::new(language_token_ids.as_slice(), device)?;
let ys = model.decoder_forward(&tokens, &audio_features, true)?;
let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?;
let logits = logits.index_select(&language_token_ids, 0)?;
let probs = candle_nn::ops::softmax(&logits, D::Minus1)?;
let probs = probs.to_vec1::<f32>()?;
let mut probs = LANGUAGES.iter().zip(probs.iter()).collect::<Vec<_>>();
probs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for ((_, language), p) in probs.iter().take(5) {
println!("{language}: {p}")
}
let language = crate::token_id(tokenizer, &format!("<|{}|>", probs[0].0 .0))?;
Ok(language)
}
| candle/candle-examples/examples/whisper/multilingual.rs/0 | {
"file_path": "candle/candle-examples/examples/whisper/multilingual.rs",
"repo_id": "candle",
"token_count": 1846
} | 45 |
/******************************************************************************
* Copyright (c) 2023, Tri Dao.
******************************************************************************/
#pragma once
namespace flash {
////////////////////////////////////////////////////////////////////////////////////////////////////
template<bool Varlen=true>
struct BlockInfo {
template<typename Params>
__device__ BlockInfo(const Params ¶ms, const int bidb)
: sum_s_q(!Varlen || params.cu_seqlens_q == nullptr ? -1 : params.cu_seqlens_q[bidb])
, sum_s_k(!Varlen || params.cu_seqlens_k == nullptr || !params.is_seqlens_k_cumulative ? -1 : params.cu_seqlens_k[bidb])
, actual_seqlen_q(!Varlen || params.cu_seqlens_q == nullptr ? params.seqlen_q : params.cu_seqlens_q[bidb + 1] - sum_s_q)
// If is_seqlens_k_cumulative, then seqlen_k is cu_seqlens_k[bidb + 1] - cu_seqlens_k[bidb].
// Otherwise it's cu_seqlens_k[bidb], i.e., we use cu_seqlens_k to store the sequence lengths of K.
, leftpad_k(params.leftpad_k == nullptr ? 0 : params.leftpad_k[bidb])
, seqlen_k_cache((!Varlen || params.cu_seqlens_k == nullptr ? params.seqlen_k : (params.is_seqlens_k_cumulative ? params.cu_seqlens_k[bidb + 1] - sum_s_k : params.cu_seqlens_k[bidb])) - leftpad_k)
, actual_seqlen_k(params.seqused_k ? params.seqused_k[bidb] - leftpad_k : seqlen_k_cache + (params.knew_ptr == nullptr ? 0 : params.seqlen_knew))
{
}
template <typename index_t>
__forceinline__ __device__ index_t q_offset(const index_t batch_stride, const index_t row_stride, const int bidb) const {
return sum_s_q == -1 ? bidb * batch_stride : uint32_t(sum_s_q) * row_stride;
}
template <typename index_t>
__forceinline__ __device__ index_t k_offset(const index_t batch_stride, const index_t row_stride, const int bidb) const {
return sum_s_k == -1 ? bidb * batch_stride + leftpad_k * row_stride : uint32_t(sum_s_k + leftpad_k) * row_stride;
}
const int sum_s_q;
const int sum_s_k;
const int actual_seqlen_q;
// We have to have seqlen_k_cache declared before actual_seqlen_k, otherwise actual_seqlen_k is set to 0.
const int leftpad_k;
const int seqlen_k_cache;
const int actual_seqlen_k;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace flash
| candle/candle-flash-attn/kernels/block_info.h/0 | {
"file_path": "candle/candle-flash-attn/kernels/block_info.h",
"repo_id": "candle",
"token_count": 930
} | 46 |
// Inspired by
// https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h
// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h
#pragma once
/// @param COND - a boolean expression to switch by
/// @param CONST_NAME - a name given for the constexpr bool variable.
/// @param ... - code to execute for true and false
///
/// Usage:
/// ```
/// BOOL_SWITCH(flag, BoolConst, [&] {
/// some_function<BoolConst>(...);
/// });
/// ```
#define BOOL_SWITCH(COND, CONST_NAME, ...) \
[&] { \
if (COND) { \
constexpr static bool CONST_NAME = true; \
return __VA_ARGS__(); \
} else { \
constexpr static bool CONST_NAME = false; \
return __VA_ARGS__(); \
} \
}()
#ifdef FLASHATTENTION_DISABLE_DROPOUT
#define DROPOUT_SWITCH(COND, CONST_NAME, ...) \
[&] { \
constexpr static bool CONST_NAME = false; \
return __VA_ARGS__(); \
}()
#else
#define DROPOUT_SWITCH BOOL_SWITCH
#endif
#ifdef FLASHATTENTION_DISABLE_ALIBI
#define ALIBI_SWITCH(COND, CONST_NAME, ...) \
[&] { \
constexpr static bool CONST_NAME = false; \
return __VA_ARGS__(); \
}()
#else
#define ALIBI_SWITCH BOOL_SWITCH
#endif
#ifdef FLASHATTENTION_DISABLE_UNEVEN_K
#define EVENK_SWITCH(COND, CONST_NAME, ...) \
[&] { \
constexpr static bool CONST_NAME = true; \
return __VA_ARGS__(); \
}()
#else
#define EVENK_SWITCH BOOL_SWITCH
#endif
#ifdef FLASHATTENTION_DISABLE_SOFTCAP
#define SOFTCAP_SWITCH(COND, CONST_NAME, ...) \
[&] { \
constexpr static bool CONST_NAME = false; \
return __VA_ARGS__(); \
}()
#else
#define SOFTCAP_SWITCH BOOL_SWITCH
#endif
#ifdef FLASHATTENTION_DISABLE_LOCAL
#define LOCAL_SWITCH(COND, CONST_NAME, ...) \
[&] { \
constexpr static bool CONST_NAME = false; \
return __VA_ARGS__(); \
}()
#else
#define LOCAL_SWITCH BOOL_SWITCH
#endif
#define FP16_SWITCH(COND, ...) \
[&] { \
if (COND) { \
using elem_type = cutlass::half_t; \
return __VA_ARGS__(); \
} else { \
using elem_type = cutlass::bfloat16_t; \
return __VA_ARGS__(); \
} \
}()
#define HEADDIM_SWITCH(HEADDIM, ...) \
[&] { \
if (HEADDIM <= 32) { \
constexpr static int kHeadDim = 32; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 64) { \
constexpr static int kHeadDim = 64; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 96) { \
constexpr static int kHeadDim = 96; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 128) { \
constexpr static int kHeadDim = 128; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 160) { \
constexpr static int kHeadDim = 160; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 192) { \
constexpr static int kHeadDim = 192; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 224) { \
constexpr static int kHeadDim = 224; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 256) { \
constexpr static int kHeadDim = 256; \
return __VA_ARGS__(); \
} \
}()
| candle/candle-flash-attn/kernels/static_switch.h/0 | {
"file_path": "candle/candle-flash-attn/kernels/static_switch.h",
"repo_id": "candle",
"token_count": 2335
} | 47 |
// WARNING: THIS IS ONLY VALID ASSUMING THAT inp IS CONTIGUOUS!
// TODO: proper error reporting when ids are larger than v_size.
#include "cuda_utils.cuh"
#include<stdint.h>
template <typename T>
__host__ __device__
constexpr T max_value();
template <>
__host__ __device__
constexpr int64_t max_value<int64_t>() {
return 0x7FFFFFFFFFFFFFFFLL;
}
template <>
__host__ __device__
constexpr uint32_t max_value<uint32_t>() {
return 0xFFFFFFFFu;
}
template <>
__host__ __device__
constexpr uint8_t max_value<uint8_t>() {
return 0xFFu;
}
template <>
__host__ __device__
constexpr int32_t max_value<int32_t>() {
return 0x7FFFFFFF;
}
template <>
__host__ __device__
constexpr int16_t max_value<int16_t>() {
return 0x7FFF;
}
template<typename T, typename I>
__device__ void index_select(
const size_t numel,
const size_t num_dims,
const size_t *info,
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t ids_dim_size,
const size_t right_size
) {
const size_t *dims = info;
const size_t *strides = info + num_dims;
bool b = is_contiguous(num_dims, dims, strides);
for (unsigned int dst_i = blockIdx.x * blockDim.x + threadIdx.x; dst_i < numel; dst_i += blockDim.x * gridDim.x) {
unsigned int left_i = dst_i / (ids_dim_size * right_size);
unsigned int id_i = dst_i / right_size % ids_dim_size;
unsigned int right_i = dst_i % right_size;
if (ids[id_i] == max_value<I>()) {
out[dst_i] = static_cast<T>(0);
} else {
assert(ids[id_i] < src_dim_size);
unsigned int src_i = left_i * (src_dim_size * right_size) + ids[id_i] * right_size + right_i;
unsigned strided_i = b ? src_i : get_strided_index(src_i, num_dims, dims, strides);
out[dst_i] = inp[strided_i];
}
}
}
#define IS_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t ids_dim_size, \
const size_t right_size \
) { index_select(numel, num_dims, info, ids, inp, out, left_size, src_dim_size, ids_dim_size, right_size); } \
template<typename T, typename I>
__device__ void gather(
const size_t numel,
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t ids_dim_size,
const size_t right_size
) {
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
size_t post = i % right_size;
const I idx = ids[i];
if (ids[i] == max_value<I>()) {
out[i] = static_cast<T>(0);
} else {
assert(idx < src_dim_size);
size_t pre = i / (right_size * ids_dim_size);
size_t src_i = (pre * src_dim_size + idx) * right_size + post;
out[i] = inp[src_i];
}
}
}
#define GATHER_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t ids_dim_size, \
const size_t right_size \
) { gather(numel, ids, inp, out, left_size, src_dim_size, ids_dim_size, right_size); } \
template<typename T, typename I>
__device__ void index_add(
const I *ids,
const size_t ids_dim_size,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t dst_dim_size,
const size_t right_size
) {
const size_t numel = left_size * right_size;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
const size_t pre = i / right_size;
const size_t post = i % right_size;
for (unsigned int j = 0; j < ids_dim_size; ++j) {
const I idx = ids[j];
const size_t src_i = (pre * ids_dim_size + j) * right_size + post;
if (idx < max_value<I>()) {
assert(idx < dst_dim_size);
const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post;
out[dst_i] += inp[src_i];
}
}
}
}
#if __CUDA_ARCH__ >= 890
#define F8E4M3_TO_FLOAT(x) __half2float(__nv_cvt_fp8_to_halfraw(x.__x, __NV_E4M3))
template<typename I>
__device__ void scatter_add_f8(
const I *ids,
const __nv_fp8_e4m3 *inp,
__nv_fp8_e4m3 *out,
const size_t left_size,
const size_t src_dim_size,
const size_t dst_dim_size,
const size_t right_size
) {
const size_t numel = left_size * right_size;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
const size_t pre = i / right_size;
const size_t post = i % right_size;
for (unsigned int j = 0; j < src_dim_size; ++j) {
const size_t src_i = (pre * src_dim_size + j) * right_size + post;
const size_t idx = ids[src_i];
const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post;
out[dst_i] = __nv_fp8_e4m3(F8E4M3_TO_FLOAT(out[dst_i]) + F8E4M3_TO_FLOAT(inp[src_i]));
}
}
}
template<typename I>
__device__ void index_add_f8(
const I *ids,
const size_t ids_dim_size,
const __nv_fp8_e4m3 *inp,
__nv_fp8_e4m3 *out,
const size_t left_size,
const size_t src_dim_size,
const size_t dst_dim_size,
const size_t right_size
) {
const size_t numel = left_size * right_size;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
const size_t pre = i / right_size;
const size_t post = i % right_size;
for (unsigned int j = 0; j < ids_dim_size; ++j) {
const size_t idx = ids[j];
const size_t src_i = (pre * ids_dim_size + j) * right_size + post;
const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post;
out[dst_i] = __nv_fp8_e4m3(F8E4M3_TO_FLOAT(out[dst_i]) + F8E4M3_TO_FLOAT(inp[src_i]));
}
}
}
#endif
#define IA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const INDEX_TYPENAME *ids, \
const size_t ids_dim_size, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t dst_dim_size, \
const size_t right_size \
) { index_add(ids, ids_dim_size, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \
#define IA_OP_F8(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const INDEX_TYPENAME *ids, \
const size_t ids_dim_size, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t dst_dim_size, \
const size_t right_size \
) { index_add_f8(ids, ids_dim_size, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \
template<typename T, typename I>
__device__ void scatter(
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t dst_dim_size,
const size_t right_size
) {
const size_t numel = left_size * right_size;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
const size_t pre = i / right_size;
const size_t post = i % right_size;
for (unsigned int j = 0; j < src_dim_size; ++j) {
const size_t src_i = (pre * src_dim_size + j) * right_size + post;
const I idx = ids[src_i];
if (idx < max_value<I>()) {
assert(idx < dst_dim_size);
const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post;
out[dst_i] = inp[src_i];
}
}
}
}
template<typename T, typename I>
__device__ void scatter_add(
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t dst_dim_size,
const size_t right_size
) {
const size_t numel = left_size * right_size;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
const size_t pre = i / right_size;
const size_t post = i % right_size;
for (unsigned int j = 0; j < src_dim_size; ++j) {
const size_t src_i = (pre * src_dim_size + j) * right_size + post;
const I idx = ids[src_i];
if (idx < max_value<I>()) {
assert(idx < dst_dim_size);
const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post;
out[dst_i] += inp[src_i];
}
}
}
}
#define S_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t dst_dim_size, \
const size_t right_size \
) { scatter(ids, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \
#define SA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t dst_dim_size, \
const size_t right_size \
) { scatter_add(ids, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \
#define SA_OP_F8(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t dst_dim_size, \
const size_t right_size \
) { scatter_add_f8(ids, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \
#if __CUDA_ARCH__ >= 800
IS_OP(__nv_bfloat16, int64_t, is_i64_bf16)
IS_OP(__nv_bfloat16, uint32_t, is_u32_bf16)
IS_OP(__nv_bfloat16, uint8_t, is_u8_bf16)
GATHER_OP(__nv_bfloat16, int64_t, gather_i64_bf16)
GATHER_OP(__nv_bfloat16, uint32_t, gather_u32_bf16)
GATHER_OP(__nv_bfloat16, uint8_t, gather_u8_bf16)
IA_OP(__nv_bfloat16, int64_t, ia_i64_bf16)
IA_OP(__nv_bfloat16, uint32_t, ia_u32_bf16)
IA_OP(__nv_bfloat16, uint8_t, ia_u8_bf16)
SA_OP(__nv_bfloat16, int64_t, sa_i64_bf16)
SA_OP(__nv_bfloat16, uint32_t, sa_u32_bf16)
SA_OP(__nv_bfloat16, uint8_t, sa_u8_bf16)
S_OP(__nv_bfloat16, int64_t, s_i64_bf16)
S_OP(__nv_bfloat16, uint32_t, s_u32_bf16)
S_OP(__nv_bfloat16, uint8_t, s_u8_bf16)
#endif
#if __CUDA_ARCH__ >= 890
IS_OP(__nv_fp8_e4m3, int16_t, is_i16_f8_e4m3)
IS_OP(__nv_fp8_e4m3, int32_t, is_i32_f8_e4m3)
IS_OP(__nv_fp8_e4m3, int64_t, is_i64_f8_e4m3)
IS_OP(__nv_fp8_e4m3, uint32_t, is_u32_f8_e4m3)
IS_OP(__nv_fp8_e4m3, uint8_t, is_u8_f8_e4m3)
GATHER_OP(__nv_fp8_e4m3, int16_t, gather_i16_f8_e4m3)
GATHER_OP(__nv_fp8_e4m3, int32_t, gather_i32_f8_e4m3)
GATHER_OP(__nv_fp8_e4m3, int64_t, gather_i64_f8_e4m3)
GATHER_OP(__nv_fp8_e4m3, uint32_t, gather_u32_f8_e4m3)
GATHER_OP(__nv_fp8_e4m3, uint8_t, gather_u8_f8_e4m3)
IA_OP_F8(__nv_fp8_e4m3, int16_t, ia_i16_f8_e4m3)
IA_OP_F8(__nv_fp8_e4m3, int32_t, ia_i32_f8_e4m3)
IA_OP_F8(__nv_fp8_e4m3, int64_t, ia_i64_f8_e4m3)
IA_OP_F8(__nv_fp8_e4m3, uint32_t, ia_u32_f8_e4m3)
IA_OP_F8(__nv_fp8_e4m3, uint8_t, ia_u8_f8_e4m3)
SA_OP_F8(__nv_fp8_e4m3, int16_t, sa_i16_f8_e4m3)
SA_OP_F8(__nv_fp8_e4m3, int32_t, sa_i32_f8_e4m3)
SA_OP_F8(__nv_fp8_e4m3, int64_t, sa_i64_f8_e4m3)
SA_OP_F8(__nv_fp8_e4m3, uint32_t, sa_u32_f8_e4m3)
SA_OP_F8(__nv_fp8_e4m3, uint8_t, sa_u8_f8_e4m3)
#endif
#if __CUDA_ARCH__ >= 530
IS_OP(__half, int64_t, is_i64_f16)
IS_OP(__half, uint32_t, is_u32_f16)
IS_OP(__half, uint8_t, is_u8_f16)
GATHER_OP(__half, int64_t, gather_i64_f16)
GATHER_OP(__half, uint32_t, gather_u32_f16)
GATHER_OP(__half, uint8_t, gather_u8_f16)
IA_OP(__half, int64_t, ia_i64_f16)
IA_OP(__half, uint32_t, ia_u32_f16)
IA_OP(__half, uint8_t, ia_u8_f16)
SA_OP(__half, int64_t, sa_i64_f16)
SA_OP(__half, uint32_t, sa_u32_f16)
SA_OP(__half, uint8_t, sa_u8_f16)
S_OP(__half, int64_t, s_i64_f16)
S_OP(__half, uint32_t, s_u32_f16)
S_OP(__half, uint8_t, s_u8_f16)
#endif
IS_OP(float, int64_t, is_i64_f32)
IS_OP(double, int64_t, is_i64_f64)
IS_OP(uint8_t, int64_t, is_i64_u8)
IS_OP(uint32_t, int64_t, is_i64_u32)
IS_OP(int64_t, int64_t, is_i64_i64)
IS_OP(float, uint32_t, is_u32_f32)
IS_OP(double, uint32_t, is_u32_f64)
IS_OP(uint8_t, uint32_t, is_u32_u8)
IS_OP(int64_t, uint32_t, is_u32_i64)
IS_OP(uint32_t, uint32_t, is_u32_u32)
IS_OP(float, uint8_t, is_u8_f32)
IS_OP(double, uint8_t, is_u8_f64)
IS_OP(uint8_t, uint8_t, is_u8_u8)
IS_OP(uint32_t, uint8_t, is_u8_u32)
IS_OP(int64_t, uint8_t, is_u8_i64)
GATHER_OP(float, int64_t, gather_i64_f32)
GATHER_OP(double, int64_t, gather_i64_f64)
GATHER_OP(uint8_t, int64_t, gather_i64_u8)
GATHER_OP(uint32_t, int64_t, gather_i64_u32)
GATHER_OP(int64_t, int64_t, gather_i64_i64)
GATHER_OP(float, uint32_t, gather_u32_f32)
GATHER_OP(double, uint32_t, gather_u32_f64)
GATHER_OP(uint8_t, uint32_t, gather_u32_u8)
GATHER_OP(int64_t, uint32_t, gather_u32_i64)
GATHER_OP(uint32_t, uint32_t, gather_u32_u32)
GATHER_OP(float, uint8_t, gather_u8_f32)
GATHER_OP(double, uint8_t, gather_u8_f64)
GATHER_OP(uint8_t, uint8_t, gather_u8_u8)
GATHER_OP(uint32_t, uint8_t, gather_u8_u32)
GATHER_OP(int64_t, uint8_t, gather_u8_i64)
IA_OP(float, int64_t, ia_i64_f32)
IA_OP(double, int64_t, ia_i64_f64)
IA_OP(uint8_t, int64_t, ia_i64_u8)
IA_OP(int64_t, int64_t, ia_i64_i64)
IA_OP(uint32_t, int64_t, ia_i64_u32)
IA_OP(float, uint32_t, ia_u32_f32)
IA_OP(double, uint32_t, ia_u32_f64)
IA_OP(uint8_t, uint32_t, ia_u32_u8)
IA_OP(int64_t, uint32_t, ia_u32_i64)
IA_OP(uint32_t, uint32_t, ia_u32_u32)
IA_OP(float, uint8_t, ia_u8_f32)
IA_OP(double, uint8_t, ia_u8_f64)
IA_OP(uint8_t, uint8_t, ia_u8_u8)
IA_OP(uint32_t, uint8_t, ia_u8_u32)
IA_OP(int64_t, uint8_t, ia_u8_i64)
SA_OP(float, int64_t, sa_i64_f32)
SA_OP(double, int64_t, sa_i64_f64)
SA_OP(uint8_t, int64_t, sa_i64_u8)
SA_OP(int64_t, int64_t, sa_i64_i64)
SA_OP(uint32_t, int64_t, sa_i64_u32)
SA_OP(float, uint32_t, sa_u32_f32)
SA_OP(double, uint32_t, sa_u32_f64)
SA_OP(uint8_t, uint32_t, sa_u32_u8)
SA_OP(int64_t, uint32_t, sa_u32_i64)
SA_OP(uint32_t, uint32_t, sa_u32_u32)
SA_OP(float, uint8_t, sa_u8_f32)
SA_OP(double, uint8_t, sa_u8_f64)
SA_OP(uint8_t, uint8_t, sa_u8_u8)
SA_OP(uint32_t, uint8_t, sa_u8_u32)
SA_OP(int64_t, uint8_t, sa_u8_i64)
S_OP(float, int64_t, s_i64_f32)
S_OP(double, int64_t, s_i64_f64)
S_OP(uint8_t, int64_t, s_i64_u8)
S_OP(int64_t, int64_t, s_i64_i64)
S_OP(uint32_t, int64_t, s_i64_u32)
S_OP(float, uint32_t, s_u32_f32)
S_OP(double, uint32_t, s_u32_f64)
S_OP(uint8_t, uint32_t, s_u32_u8)
S_OP(int64_t, uint32_t, s_u32_i64)
S_OP(uint32_t, uint32_t, s_u32_u32)
S_OP(float, uint8_t, s_u8_f32)
S_OP(double, uint8_t, s_u8_f64)
S_OP(uint8_t, uint8_t, s_u8_u8)
S_OP(uint32_t, uint8_t, s_u8_u32)
S_OP(int64_t, uint8_t, s_u8_i64)
| candle/candle-kernels/src/indexing.cu/0 | {
"file_path": "candle/candle-kernels/src/indexing.cu",
"repo_id": "candle",
"token_count": 8193
} | 48 |
#include <metal_stdlib>
using namespace metal;
template <typename T>
inline T max_value();
template <>
inline int64_t max_value<int64_t>() {
return 0x7FFFFFFFFFFFFFFF;
}
template <>
inline uint32_t max_value<uint32_t>() {
return 0xFFFFFFFFu;
}
template <>
inline uint8_t max_value<uint8_t>() {
return 0xFF;
}
METAL_FUNC uint get_strided_index(
uint idx,
constant size_t &num_dims,
constant size_t *dims,
constant size_t *strides
) {
uint strided_i = 0;
for (uint d = 0; d < num_dims; d++) {
uint dim_idx = num_dims - 1 - d;
strided_i += (idx % dims[dim_idx]) * strides[dim_idx];
idx /= dims[dim_idx];
}
return strided_i;
}
template<typename TYPENAME, typename INDEX_TYPENAME>
METAL_FUNC void index(
constant size_t &dst_size,
constant size_t &left_size,
constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &ids_size,
constant bool &contiguous,
constant size_t *src_dims,
constant size_t *src_strides,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const size_t id_i = (tid / right_size) % ids_size;
if (input_ids[id_i] == max_value<INDEX_TYPENAME>()) {
output[tid] = static_cast<TYPENAME>(0);
} else {
const INDEX_TYPENAME input_i = min(input_ids[id_i], (INDEX_TYPENAME)(src_dim_size - 1));
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size / ids_size;
/*
// Force prevent out of bounds indexing
// since there doesn't seem to be a good way to force crash
// No need to check for zero we're only allowing unsized.
*/
const size_t src_i = left_rank_i * src_dim_size * right_size + input_i * right_size + right_rank_i;
const size_t strided_src_i = contiguous ? src_i : get_strided_index(src_i, src_dim_size, src_dims, src_strides);
output[tid] = input[strided_src_i];
}
}
# define INDEX_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void NAME( \
constant size_t &dst_size, \
constant size_t &left_size, \
constant size_t &src_dim_size, \
constant size_t &right_size, \
constant size_t &ids_size, \
constant bool &contiguous, \
constant size_t *src_dims, \
constant size_t *src_strides, \
const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
index<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, ids_size, contiguous, src_dims, src_strides, input, input_ids, output, tid); \
}
template<typename TYPENAME, typename INDEX_TYPENAME>
METAL_FUNC void gather(
constant size_t &dst_size,
constant size_t &left_size,
constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &ids_size,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const INDEX_TYPENAME input_i = input_ids[tid];
if (input_i == max_value<INDEX_TYPENAME>()) {
output[tid] = static_cast<TYPENAME>(0);
} else {
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size / ids_size;
const size_t src_i = (left_rank_i * src_dim_size + input_i) * right_size + right_rank_i;
output[tid] = input[src_i];
}
}
# define GATHER_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void NAME( \
constant size_t &dst_size, \
constant size_t &left_size, \
constant size_t &src_dim_size, \
constant size_t &right_size, \
constant size_t &ids_size, \
const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
gather<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, ids_size, input, input_ids, output, tid); \
}
template<typename TYPENAME, typename INDEX_TYPENAME>
METAL_FUNC void scatter(
constant size_t &dst_size,
constant size_t &left_size,
constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &dst_dim_size,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size;
for (unsigned int j = 0; j < src_dim_size; ++j) {
const size_t src_i = (left_rank_i * src_dim_size + j) * right_size + right_rank_i;
const INDEX_TYPENAME idx = input_ids[src_i];
if (idx < max_value<INDEX_TYPENAME>()) {
const size_t dst_i = (left_rank_i * dst_dim_size + idx) * right_size + right_rank_i;
output[dst_i] = input[src_i];
}
}
}
template<typename TYPENAME, typename INDEX_TYPENAME>
METAL_FUNC void scatter_add(
constant size_t &dst_size,
constant size_t &left_size,
constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &dst_dim_size,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size;
for (unsigned int j = 0; j < src_dim_size; ++j) {
const size_t src_i = (left_rank_i * src_dim_size + j) * right_size + right_rank_i;
const INDEX_TYPENAME idx = input_ids[src_i];
if (idx < max_value<INDEX_TYPENAME>()) {
const size_t dst_i = (left_rank_i * dst_dim_size + idx) * right_size + right_rank_i;
output[dst_i] += input[src_i];
}
}
}
# define SCATTER_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void NAME( \
constant size_t &dst_size, \
constant size_t &left_size, \
constant size_t &src_dim_size, \
constant size_t &right_size, \
constant size_t &dst_dim_size, \
const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
scatter<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, dst_dim_size, input, input_ids, output, tid); \
}
# define SCATTER_ADD_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void NAME( \
constant size_t &dst_size, \
constant size_t &left_size, \
constant size_t &src_dim_size, \
constant size_t &right_size, \
constant size_t &dst_dim_size, \
const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
scatter_add<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, dst_dim_size, input, input_ids, output, tid); \
}
template<typename TYPENAME, typename INDEX_TYPENAME>
METAL_FUNC void index_add(
constant size_t &dst_size,
constant size_t &left_size,
constant size_t &src_dim_size,
constant size_t &right_size,
constant size_t &dst_dim_size,
constant size_t &ids_dim_size,
const device TYPENAME *input,
const device INDEX_TYPENAME *input_ids,
device TYPENAME *output,
uint tid [[ thread_position_in_grid ]]
) {
if (tid >= dst_size) {
return;
}
const size_t right_rank_i = tid % right_size;
const size_t left_rank_i = tid / right_size;
for (unsigned int j = 0; j < ids_dim_size; ++j) {
const INDEX_TYPENAME idx = input_ids[j];
if (idx < max_value<INDEX_TYPENAME>()) {
const size_t src_i = (left_rank_i * src_dim_size + j) * right_size + right_rank_i;
const size_t dst_i = (left_rank_i * dst_dim_size + idx) * right_size + right_rank_i;
output[dst_i] += input[src_i];
}
}
}
# define INDEX_ADD_OP(NAME, INDEX_TYPENAME, TYPENAME) \
kernel void NAME( \
constant size_t &dst_size, \
constant size_t &left_size, \
constant size_t &src_dim_size, \
constant size_t &right_size, \
constant size_t &dst_dim_size, \
constant size_t &ids_dim_size, \
const device TYPENAME *input, \
const device INDEX_TYPENAME *input_ids, \
device TYPENAME *output, \
uint tid [[ thread_position_in_grid ]] \
) { \
index_add<TYPENAME, INDEX_TYPENAME>(dst_size, left_size, src_dim_size, right_size, dst_dim_size, ids_dim_size, input, input_ids, output, tid); \
}
INDEX_OP(is_i64_f32, int64_t, float)
INDEX_OP(is_i64_f16, int64_t, half)
#if defined(__HAVE_BFLOAT__)
INDEX_OP(is_i64_bf16, int64_t, bfloat)
#endif
INDEX_OP(is_u32_u8, uint32_t, uint8_t)
INDEX_OP(is_u32_u32, uint32_t, uint32_t)
INDEX_OP(is_u32_f32, uint32_t, float)
INDEX_OP(is_u32_f16, uint32_t, half)
#if defined(__HAVE_BFLOAT__)
INDEX_OP(is_u32_bf16, uint32_t, bfloat)
#endif
INDEX_OP(is_u8_u8, uint8_t, uint8_t)
INDEX_OP(is_u8_u32, uint8_t, uint32_t)
INDEX_OP(is_u8_f32, uint8_t, float)
INDEX_OP(is_u8_f16, uint8_t, half)
#if defined(__HAVE_BFLOAT__)
INDEX_OP(is_u8_bf16, uint8_t, bfloat)
#endif
GATHER_OP(gather_i64_f32, int64_t, float)
GATHER_OP(gather_i64_f16, int64_t, half)
GATHER_OP(gather_u32_f32, uint, float)
GATHER_OP(gather_u32_f16, uint, half)
#if defined(__HAVE_BFLOAT__)
GATHER_OP(gather_i64_bf16, int64_t, bfloat)
GATHER_OP(gather_u32_bf16, uint, bfloat)
#endif
GATHER_OP(gather_i64_u32, int64_t, uint)
GATHER_OP(gather_u32_u32, uint, uint)
GATHER_OP(gather_i64_i64, int64_t, int64_t)
GATHER_OP(gather_u32_i64, uint, int64_t)
SCATTER_ADD_OP(sa_u32_f32, uint32_t, float)
SCATTER_ADD_OP(sa_u8_f32, uint8_t, float)
SCATTER_ADD_OP(sa_i64_f32, int64_t, float)
SCATTER_ADD_OP(sa_u32_u32, uint32_t, uint32_t)
SCATTER_ADD_OP(sa_u32_f16, uint32_t, half)
SCATTER_ADD_OP(sa_u8_f16, uint8_t, half)
SCATTER_ADD_OP(sa_i64_f16, int64_t, half)
#if defined(__HAVE_BFLOAT__)
SCATTER_ADD_OP(sa_u32_bf16, uint32_t, bfloat)
SCATTER_ADD_OP(sa_u8_bf16, uint8_t, bfloat)
SCATTER_ADD_OP(sa_i64_bf16, int64_t, bfloat)
#endif
SCATTER_OP(s_u32_f32, uint32_t, float)
SCATTER_OP(s_u8_f32, uint8_t, float)
SCATTER_OP(s_i64_f32, int64_t, float)
SCATTER_OP(s_u32_u32, uint32_t, uint32_t)
SCATTER_OP(s_u32_f16, uint32_t, half)
SCATTER_OP(s_u8_f16, uint8_t, half)
SCATTER_OP(s_i64_f16, int64_t, half)
#if defined(__HAVE_BFLOAT__)
SCATTER_OP(s_u32_bf16, uint32_t, bfloat)
SCATTER_OP(s_u8_bf16, uint8_t, bfloat)
SCATTER_OP(s_i64_bf16, int64_t, bfloat)
#endif
// i64
INDEX_ADD_OP(ia_i64_f16, int64_t, half)
INDEX_ADD_OP(ia_i64_f32, int64_t, float)
INDEX_ADD_OP(ia_i64_i64, int64_t, int64_t)
INDEX_ADD_OP(ia_i64_u32, int64_t, uint32_t)
INDEX_ADD_OP(ia_i64_u8, int64_t, uint8_t)
#if defined(__HAVE_BFLOAT__)
INDEX_ADD_OP(ia_i64_bf16, int64_t, bfloat)
#endif
// u32
INDEX_ADD_OP(ia_u32_f16, uint32_t, half)
INDEX_ADD_OP(ia_u32_f32, uint32_t, float)
INDEX_ADD_OP(ia_u32_i64, uint32_t, int64_t)
INDEX_ADD_OP(ia_u32_u32, uint32_t, uint32_t)
INDEX_ADD_OP(ia_u32_u8, uint32_t, uint8_t)
#if defined(__HAVE_BFLOAT__)
INDEX_ADD_OP(ia_u32_bf16, uint32_t, bfloat)
#endif
// u8
INDEX_ADD_OP(ia_u8_f16, uint8_t, half)
INDEX_ADD_OP(ia_u8_f32, uint8_t, float)
INDEX_ADD_OP(ia_u8_i64, uint8_t, int64_t)
INDEX_ADD_OP(ia_u8_u32, uint8_t, uint32_t)
INDEX_ADD_OP(ia_u8_u8, uint8_t, uint8_t)
#if defined(__HAVE_BFLOAT__)
INDEX_ADD_OP(ia_u8_bf16, uint8_t, bfloat)
#endif
| candle/candle-metal-kernels/src/indexing.metal/0 | {
"file_path": "candle/candle-metal-kernels/src/indexing.metal",
"repo_id": "candle",
"token_count": 5472
} | 49 |
use candle_metal_kernels::{call_affine, Kernels};
use metal::objc::rc::autoreleasepool;
use metal::{Device, MTLResourceOptions};
use rand;
use std::any::type_name;
use std::time::Instant;
fn main() {
let device = Device::system_default().unwrap();
let kernels = Kernels::new();
let f32_1k = (0..1000).map(|_| rand::random::<f32>()).collect::<Vec<_>>();
let f32_10k = (0..10000)
.map(|_| rand::random::<f32>())
.collect::<Vec<_>>();
let f32_100k = (0..100000)
.map(|_| rand::random::<f32>())
.collect::<Vec<_>>();
println!(
"{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11} | {5: <11}",
"dtype", "kernel", "size", "runs", "total time", "avg time"
);
// f32
run_affine_bench(&device, &kernels, &f32_1k);
run_affine_bench(&device, &kernels, &f32_10k);
run_affine_bench(&device, &kernels, &f32_100k);
}
fn run_affine_bench<T: Clone>(device: &Device, kernels: &Kernels, v: &[T]) {
let command_queue = device.new_command_queue();
let options = MTLResourceOptions::StorageModeManaged;
let iterations = 10000;
let input = device.new_buffer_with_data(
v.as_ptr() as *const core::ffi::c_void,
core::mem::size_of_val(v) as u64,
options,
);
let mut output = device.new_buffer(core::mem::size_of_val(v) as u64, options);
let mul: f32 = 1.2345;
let add: f32 = 2.3456;
let total_time = autoreleasepool(|| {
let command_buffer = command_queue.new_command_buffer();
let start = Instant::now();
for _ in 0..iterations {
call_affine(
&device,
command_buffer,
&kernels,
"affine_float",
v.len(),
&input,
&mut output,
mul,
add,
)
.unwrap();
}
command_buffer.commit();
command_buffer.wait_until_completed();
start.elapsed()
});
println!(
"{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}",
type_name::<T>().split("::").last().unwrap(),
"affine",
v.len(),
iterations,
total_time,
total_time / iterations
);
}
| candle/candle-metal-kernels/tmp/affine.rs/0 | {
"file_path": "candle/candle-metal-kernels/tmp/affine.rs",
"repo_id": "candle",
"token_count": 1154
} | 50 |
//! Embedding Layer.
use candle::{Result, Tensor};
#[derive(Clone, Debug)]
pub struct Embedding {
embeddings: Tensor,
hidden_size: usize,
}
impl Embedding {
pub fn new(embeddings: Tensor, hidden_size: usize) -> Self {
Self {
embeddings,
hidden_size,
}
}
pub fn embeddings(&self) -> &Tensor {
&self.embeddings
}
/// Get the hidden size of the embedding matrix
pub fn hidden_size(&self) -> usize {
self.hidden_size
}
}
impl crate::Module for Embedding {
fn forward(&self, indexes: &Tensor) -> Result<Tensor> {
let mut final_dims = indexes.dims().to_vec();
final_dims.push(self.hidden_size);
let indexes = indexes.flatten_all()?;
let values = self.embeddings.index_select(&indexes, 0)?;
let values = values.reshape(final_dims)?;
Ok(values)
}
}
pub fn embedding(in_size: usize, out_size: usize, vb: crate::VarBuilder) -> Result<Embedding> {
let embeddings = vb.get_with_hints(
(in_size, out_size),
"weight",
crate::Init::Randn {
mean: 0.,
stdev: 1.,
},
)?;
Ok(Embedding::new(embeddings, out_size))
}
| candle/candle-nn/src/embedding.rs/0 | {
"file_path": "candle/candle-nn/src/embedding.rs",
"repo_id": "candle",
"token_count": 571
} | 51 |
//! A `VarBuilder` for variable retrieval from models
//!
//! A `VarBuilder` is used to retrieve variables used by a model. These variables can either come
//! from a pre-trained checkpoint, e.g. using `VarBuilder::from_mmaped_safetensors`, or initialized
//! for training, e.g. using `VarBuilder::from_varmap`.
use crate::VarMap;
use candle::{safetensors::Load, DType, Device, Error, Result, Shape, Tensor};
use safetensors::{slice::IndexOp, tensor::SafeTensors};
use std::collections::HashMap;
use std::sync::Arc;
/// A structure used to retrieve variables, these variables can either come from storage or be
/// generated via some form of initialization.
///
/// The way to retrieve variables is defined in the backend embedded in the `VarBuilder`.
pub struct VarBuilderArgs<'a, B: Backend> {
data: Arc<TensorData<B>>,
path: Vec<String>,
pub dtype: DType,
_phantom: std::marker::PhantomData<&'a B>,
}
impl<B: Backend> Clone for VarBuilderArgs<'_, B> {
fn clone(&self) -> Self {
Self {
data: self.data.clone(),
path: self.path.clone(),
dtype: self.dtype,
_phantom: self._phantom,
}
}
}
/// A simple `VarBuilder`, this is less generic than `VarBuilderArgs` but should cover most common
/// use cases.
pub type VarBuilder<'a> = VarBuilderArgs<'a, Box<dyn SimpleBackend + 'a>>;
struct TensorData<B: Backend> {
backend: B,
pub device: Device,
}
/// A trait that defines how tensor data is retrieved.
///
/// Typically this would use disk storage in some specific format, or random initialization.
/// Note that there is a specialized version of this trait (`SimpleBackend`) that can be used most
/// of the time. The main restriction is that it doesn't allow for specific args (besides
/// initialization hints).
pub trait Backend: Send + Sync {
type Hints: Default;
/// Retrieve a tensor with some target shape.
fn get(
&self,
s: Shape,
name: &str,
h: Self::Hints,
dtype: DType,
dev: &Device,
) -> Result<Tensor>;
fn contains_tensor(&self, name: &str) -> bool;
}
pub trait SimpleBackend: Send + Sync {
/// Retrieve a tensor based on a target name and shape.
fn get(
&self,
s: Shape,
name: &str,
h: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor>;
fn contains_tensor(&self, name: &str) -> bool;
}
impl Backend for Box<dyn SimpleBackend + '_> {
type Hints = crate::Init;
fn get(
&self,
s: Shape,
name: &str,
h: Self::Hints,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
self.as_ref().get(s, name, h, dtype, dev)
}
fn contains_tensor(&self, name: &str) -> bool {
self.as_ref().contains_tensor(name)
}
}
impl<B: Backend> VarBuilderArgs<'_, B> {
pub fn new_with_args(backend: B, dtype: DType, dev: &Device) -> Self {
let data = TensorData {
backend,
device: dev.clone(),
};
Self {
data: Arc::new(data),
path: vec![],
dtype,
_phantom: std::marker::PhantomData,
}
}
/// Returns the prefix of the `VarBuilder`.
pub fn prefix(&self) -> String {
self.path.join(".")
}
/// Returns a new `VarBuilder` using the root path.
pub fn root(&self) -> Self {
Self {
data: self.data.clone(),
path: vec![],
dtype: self.dtype,
_phantom: std::marker::PhantomData,
}
}
/// Returns a new `VarBuilder` with the prefix set to `prefix`.
pub fn set_prefix(&self, prefix: impl ToString) -> Self {
Self {
data: self.data.clone(),
path: vec![prefix.to_string()],
dtype: self.dtype,
_phantom: std::marker::PhantomData,
}
}
/// Return a new `VarBuilder` adding `s` to the current prefix. This can be think of as `cd`
/// into a directory.
pub fn push_prefix<S: ToString>(&self, s: S) -> Self {
let mut path = self.path.clone();
path.push(s.to_string());
Self {
data: self.data.clone(),
path,
dtype: self.dtype,
_phantom: std::marker::PhantomData,
}
}
/// Short alias for `push_prefix`.
pub fn pp<S: ToString>(&self, s: S) -> Self {
self.push_prefix(s)
}
/// The device used by default.
pub fn device(&self) -> &Device {
&self.data.device
}
/// The dtype used by default.
pub fn dtype(&self) -> DType {
self.dtype
}
/// Clone the VarBuilder tweaking its dtype
pub fn to_dtype(&self, dtype: DType) -> Self {
Self {
data: self.data.clone(),
path: self.path.clone(),
dtype,
_phantom: std::marker::PhantomData,
}
}
fn path(&self, tensor_name: &str) -> String {
if self.path.is_empty() {
tensor_name.to_string()
} else {
[&self.path.join("."), tensor_name].join(".")
}
}
/// This returns true only if a tensor with the passed in name is available. E.g. when passed
/// `a`, true is returned if `prefix.a` exists but false is returned if only `prefix.a.b`
/// exists.
pub fn contains_tensor(&self, tensor_name: &str) -> bool {
let path = self.path(tensor_name);
self.data.backend.contains_tensor(&path)
}
/// Retrieve the tensor associated with the given name at the current path.
pub fn get_with_hints<S: Into<Shape>>(
&self,
s: S,
name: &str,
hints: B::Hints,
) -> Result<Tensor> {
self.get_with_hints_dtype(s, name, hints, self.dtype)
}
/// Retrieve the tensor associated with the given name at the current path.
pub fn get<S: Into<Shape>>(&self, s: S, name: &str) -> Result<Tensor> {
self.get_with_hints(s, name, Default::default())
}
/// Retrieve the tensor associated with the given name & dtype at the current path.
pub fn get_with_hints_dtype<S: Into<Shape>>(
&self,
s: S,
name: &str,
hints: B::Hints,
dtype: DType,
) -> Result<Tensor> {
let path = self.path(name);
self.data
.backend
.get(s.into(), &path, hints, dtype, &self.data.device)
}
}
struct Zeros;
impl SimpleBackend for Zeros {
fn get(&self, s: Shape, _: &str, _: crate::Init, dtype: DType, dev: &Device) -> Result<Tensor> {
Tensor::zeros(s, dtype, dev)
}
fn contains_tensor(&self, _name: &str) -> bool {
true
}
}
impl SimpleBackend for HashMap<String, Tensor> {
fn get(
&self,
s: Shape,
name: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = self
.get(name)
.ok_or_else(|| {
Error::CannotFindTensor {
path: name.to_string(),
}
.bt()
})?
.clone();
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {name}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
tensor.to_device(dev)?.to_dtype(dtype)
}
fn contains_tensor(&self, name: &str) -> bool {
self.contains_key(name)
}
}
impl SimpleBackend for VarMap {
fn get(
&self,
s: Shape,
name: &str,
h: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
VarMap::get(self, s, name, h, dtype, dev)
}
fn contains_tensor(&self, name: &str) -> bool {
self.data().lock().unwrap().contains_key(name)
}
}
#[allow(dead_code)]
pub struct SafeTensorWithRouting<'a> {
routing: HashMap<String, usize>,
safetensors: Vec<SafeTensors<'a>>,
}
impl SimpleBackend for SafeTensorWithRouting<'_> {
fn get(
&self,
s: Shape,
path: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let index = self.routing.get(path).ok_or_else(|| {
Error::CannotFindTensor {
path: path.to_string(),
}
.bt()
})?;
let tensor = self.safetensors[*index]
.tensor(path)?
.load(dev)?
.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {path}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.routing.contains_key(name)
}
}
impl SimpleBackend for candle::npy::NpzTensors {
fn get(
&self,
s: Shape,
path: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = match self.get(path)? {
None => Err(Error::CannotFindTensor {
path: path.to_string(),
}
.bt())?,
Some(tensor) => tensor,
};
let tensor = tensor.to_device(dev)?.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {path}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.get(name).is_ok_and(|v| v.is_some())
}
}
impl SimpleBackend for candle::pickle::PthTensors {
fn get(
&self,
s: Shape,
path: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = match self.get(path)? {
None => Err(Error::CannotFindTensor {
path: path.to_string(),
}
.bt())?,
Some(tensor) => tensor,
};
let tensor = tensor.to_device(dev)?.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {path}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.get(name).is_ok_and(|v| v.is_some())
}
}
impl SimpleBackend for candle::safetensors::MmapedSafetensors {
fn get(
&self,
s: Shape,
name: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = self.load(name, dev)?.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {name}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.get(name).is_ok()
}
}
impl SimpleBackend for candle::safetensors::BufferedSafetensors {
fn get(
&self,
s: Shape,
name: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = self.load(name, dev)?.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {name}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.get(name).is_ok()
}
}
impl SimpleBackend for candle::safetensors::SliceSafetensors<'_> {
fn get(
&self,
s: Shape,
name: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = self.load(name, dev)?.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {name}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.get(name).is_ok()
}
}
impl<'a> VarBuilder<'a> {
/// Initializes a `VarBuilder` using a custom backend.
///
/// It is preferred to use one of the more specific constructors. This
/// constructor is provided to allow downstream users to define their own
/// backends.
pub fn from_backend(
backend: Box<dyn SimpleBackend + 'a>,
dtype: DType,
device: Device,
) -> Self {
let data = TensorData { backend, device };
Self {
data: Arc::new(data),
path: vec![],
dtype,
_phantom: std::marker::PhantomData,
}
}
/// Initializes a `VarBuilder` that uses zeros for any tensor.
pub fn zeros(dtype: DType, dev: &Device) -> Self {
Self::from_backend(Box::new(Zeros), dtype, dev.clone())
}
/// Initializes a `VarBuilder` that retrieves tensors stored in a hashtable. An error is
/// returned if no tensor is available under the requested path or on shape mismatches.
pub fn from_tensors(ts: HashMap<String, Tensor>, dtype: DType, dev: &Device) -> Self {
Self::from_backend(Box::new(ts), dtype, dev.clone())
}
/// Initializes a `VarBuilder` using a `VarMap`. The requested tensors are created and
/// initialized on new paths, the same tensor is used if the same path is requested multiple
/// times. This is commonly used when initializing a model before training.
///
/// Note that it is possible to load the tensor values after model creation using the `load`
/// method on `varmap`, this can be used to start model training from an existing checkpoint.
pub fn from_varmap(varmap: &VarMap, dtype: DType, dev: &Device) -> Self {
Self::from_backend(Box::new(varmap.clone()), dtype, dev.clone())
}
/// Initializes a `VarBuilder` that retrieves tensors stored in a collection of safetensors
/// files.
///
/// # Safety
///
/// The unsafe is inherited from [`memmap2::MmapOptions`].
pub unsafe fn from_mmaped_safetensors<P: AsRef<std::path::Path>>(
paths: &[P],
dtype: DType,
dev: &Device,
) -> Result<Self> {
let tensors = candle::safetensors::MmapedSafetensors::multi(paths)?;
Ok(Self::from_backend(Box::new(tensors), dtype, dev.clone()))
}
/// Initializes a `VarBuilder` from a binary buffer in the safetensor format.
pub fn from_buffered_safetensors(data: Vec<u8>, dtype: DType, dev: &Device) -> Result<Self> {
let tensors = candle::safetensors::BufferedSafetensors::new(data)?;
Ok(Self::from_backend(Box::new(tensors), dtype, dev.clone()))
}
/// Initializes a `VarBuilder` from a binary slice in the safetensor format.
pub fn from_slice_safetensors(data: &'a [u8], dtype: DType, dev: &Device) -> Result<Self> {
let tensors = candle::safetensors::SliceSafetensors::new(data)?;
Ok(Self::from_backend(Box::new(tensors), dtype, dev.clone()))
}
/// Initializes a `VarBuilder` that retrieves tensors stored in a numpy npz file.
pub fn from_npz<P: AsRef<std::path::Path>>(p: P, dtype: DType, dev: &Device) -> Result<Self> {
let npz = candle::npy::NpzTensors::new(p)?;
Ok(Self::from_backend(Box::new(npz), dtype, dev.clone()))
}
/// Initializes a `VarBuilder` that retrieves tensors stored in a pytorch pth file.
pub fn from_pth<P: AsRef<std::path::Path>>(p: P, dtype: DType, dev: &Device) -> Result<Self> {
let pth = candle::pickle::PthTensors::new(p, None)?;
Ok(Self::from_backend(Box::new(pth), dtype, dev.clone()))
}
/// Initializes a `VarBuilder` that retrieves tensors stored in a pytorch pth file.
/// similar to [`from_pth`] but requires a `state_key`.
pub fn from_pth_with_state<P: AsRef<std::path::Path>>(
p: P,
dtype: DType,
state_key: &str,
dev: &Device,
) -> Result<Self> {
let pth = candle::pickle::PthTensors::new(p, Some(state_key))?;
Ok(Self::from_backend(Box::new(pth), dtype, dev.clone()))
}
/// Gets a VarBuilder that applies some renaming function on tensor it gets queried for before
/// passing the new names to the inner VarBuilder.
///
/// ```rust
/// use candle::{Tensor, DType, Device};
///
/// let a = Tensor::arange(0f32, 6f32, &Device::Cpu)?.reshape((2, 3))?;
/// let tensors: std::collections::HashMap<_, _> = [
/// ("foo".to_string(), a),
/// ]
/// .into_iter()
/// .collect();
/// let vb = candle_nn::VarBuilder::from_tensors(tensors, DType::F32, &Device::Cpu);
/// assert!(vb.contains_tensor("foo"));
/// assert!(vb.get((2, 3), "foo").is_ok());
/// assert!(!vb.contains_tensor("bar"));
/// let vb = vb.rename_f(|f: &str| if f == "bar" { "foo".to_string() } else { f.to_string() });
/// assert!(vb.contains_tensor("bar"));
/// assert!(vb.contains_tensor("foo"));
/// assert!(vb.get((2, 3), "bar").is_ok());
/// assert!(vb.get((2, 3), "foo").is_ok());
/// assert!(!vb.contains_tensor("baz"));
/// # Ok::<(), candle::Error>(())
/// ```
pub fn rename_f<F: Fn(&str) -> String + Sync + Send + 'static>(self, f: F) -> Self {
let f: Box<dyn Fn(&str) -> String + Sync + Send + 'static> = Box::new(f);
self.rename(f)
}
pub fn rename<R: Renamer + Send + Sync + 'a>(self, renamer: R) -> Self {
let dtype = self.dtype();
let device = self.device().clone();
let path = self.path.clone();
let backend = Rename::new(self, renamer);
let backend: Box<dyn SimpleBackend + 'a> = Box::new(backend);
let data = TensorData { backend, device };
Self {
data: Arc::new(data),
dtype,
path,
_phantom: std::marker::PhantomData,
}
}
}
pub struct ShardedSafeTensors(candle::safetensors::MmapedSafetensors);
pub type ShardedVarBuilder<'a> = VarBuilderArgs<'a, ShardedSafeTensors>;
impl ShardedSafeTensors {
/// Initializes a `VarBuilder` that retrieves tensors stored in a collection of safetensors
/// files and make them usable in a sharded way.
///
/// # Safety
///
/// The unsafe is inherited from [`memmap2::MmapOptions`].
pub unsafe fn var_builder<P: AsRef<std::path::Path>>(
paths: &[P],
dtype: DType,
dev: &Device,
) -> Result<ShardedVarBuilder<'static>> {
let tensors = candle::safetensors::MmapedSafetensors::multi(paths)?;
let backend = ShardedSafeTensors(tensors);
Ok(VarBuilderArgs::new_with_args(backend, dtype, dev))
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct Shard {
pub dim: usize,
pub rank: usize,
pub world_size: usize,
}
impl Default for Shard {
fn default() -> Self {
Self {
dim: 0,
rank: 0,
world_size: 1,
}
}
}
/// Get part of a tensor, typically used to do Tensor Parallelism sharding.
///
/// If the tensor is of size (1024, 1024).
///
/// `dim` corresponds to the dimension to slice into
/// `rank` is the rank of the current process
/// `world_size` is the total number of ranks in the process group
///
/// `get_sharded("tensor", 0, 0, 2)` means `tensor.i((..512))`
/// `get_sharded("tensor", 0, 1, 2)` means `tensor.i((512..))`
/// `get_sharded("tensor", 1, 0, 2)` means `tensor.i((.., ..512))`
impl Backend for ShardedSafeTensors {
type Hints = Shard;
fn get(
&self,
target_shape: Shape, // The size is only checked when the world size is 1.
path: &str,
h: Self::Hints,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
if h.world_size == 1 {
// There is no sharding to be applied here so we use the default backend to speed
// things up.
return SimpleBackend::get(&self.0, target_shape, path, Default::default(), dtype, dev);
}
let Shard {
dim,
rank,
world_size,
} = h;
let view = self.0.get(path)?;
let view_dtype = view.dtype();
let mut shape = view.shape().to_vec();
let size = shape[dim];
if size % world_size != 0 {
return Err(Error::ShapeMismatchSplit {
shape: shape.into(),
dim,
n_parts: world_size,
});
}
let block_size = size / world_size;
let start = rank * block_size;
let stop = (rank + 1) * block_size;
// Everything is expressed in tensor dimension
// bytes offsets is handled automatically for safetensors.
let iterator = if dim == 0 {
view.slice(start..stop).map_err(|_| {
Error::Msg(format!(
"Cannot slice tensor {path} ({shape:?} along dim {dim} with {start}..{stop}"
))
})?
} else if dim == 1 {
view.slice((.., start..stop)).map_err(|_| {
Error::Msg(format!(
"Cannot slice tensor {path} ({shape:?} along dim {dim} with {start}..{stop}"
))
})?
} else {
candle::bail!("Get sharded on dimensions != 0 or 1")
};
shape[dim] = block_size;
let view_dtype: DType = view_dtype.try_into()?;
let raw: Vec<u8> = iterator.into_iter().flatten().cloned().collect();
Tensor::from_raw_buffer(&raw, view_dtype, &shape, dev)?.to_dtype(dtype)
}
fn contains_tensor(&self, name: &str) -> bool {
self.0.get(name).is_ok()
}
}
/// This traits specifies a way to rename the queried names into names that are stored in an inner
/// VarBuilder.
pub trait Renamer {
/// This is applied to the name obtained by a name call and the resulting name is passed to the
/// inner VarBuilder.
fn rename(&self, v: &str) -> std::borrow::Cow<'_, str>;
}
pub struct Rename<'a, R: Renamer> {
inner: VarBuilder<'a>,
renamer: R,
}
impl<R: Renamer + Sync + Send> SimpleBackend for Rename<'_, R> {
fn get(
&self,
s: Shape,
name: &str,
h: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let name = self.renamer.rename(name);
self.inner
.get_with_hints_dtype(s, &name, h, dtype)?
.to_device(dev)
}
fn contains_tensor(&self, name: &str) -> bool {
let name = self.renamer.rename(name);
self.inner.contains_tensor(&name)
}
}
impl<'a, R: Renamer> Rename<'a, R> {
pub fn new(inner: VarBuilder<'a>, renamer: R) -> Self {
Self { inner, renamer }
}
}
impl Renamer for Box<dyn Fn(&str) -> String + Sync + Send> {
fn rename(&self, v: &str) -> std::borrow::Cow<'_, str> {
std::borrow::Cow::Owned(self(v))
}
}
| candle/candle-nn/src/var_builder.rs/0 | {
"file_path": "candle/candle-nn/src/var_builder.rs",
"repo_id": "candle",
"token_count": 11054
} | 52 |
use candle::Result;
use prost::Message;
pub mod onnx {
include!(concat!(env!("OUT_DIR"), "/onnx.rs"));
}
pub mod eval;
pub use eval::{dtype, simple_eval};
pub fn read_file<P: AsRef<std::path::Path>>(p: P) -> Result<onnx::ModelProto> {
let buf = std::fs::read(p)?;
onnx::ModelProto::decode(buf.as_slice()).map_err(candle::Error::wrap)
}
| candle/candle-onnx/src/lib.rs/0 | {
"file_path": "candle/candle-onnx/src/lib.rs",
"repo_id": "candle",
"token_count": 154
} | 53 |
from .module import Module
from .container import Sequential, ModuleList, ModuleDict
from .sparse import Embedding
from .normalization import LayerNorm
from .linear import Linear
| candle/candle-pyo3/py_src/candle/nn/__init__.py/0 | {
"file_path": "candle/candle-pyo3/py_src/candle/nn/__init__.py",
"repo_id": "candle",
"token_count": 43
} | 54 |
use std::collections::HashMap;
use crate::utils::wrap_err;
use crate::{PyDType, PyTensor};
use candle_onnx::eval::{dtype, get_tensor, simple_eval};
use candle_onnx::onnx::tensor_proto::DataType;
use candle_onnx::onnx::tensor_shape_proto::dimension::Value;
use candle_onnx::onnx::type_proto::{Tensor as ONNXTensor, Value as ONNXValue};
use candle_onnx::onnx::{ModelProto, ValueInfoProto};
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use pyo3::types::{PyList, PyTuple};
#[derive(Clone, Debug)]
#[pyclass(name = "ONNXTensorDescription")]
/// A wrapper around an ONNX tensor description.
pub struct PyONNXTensorDescriptor(ONNXTensor);
#[pymethods]
impl PyONNXTensorDescriptor {
#[getter]
/// The data type of the tensor.
/// &RETURNS&: DType
fn dtype(&self) -> PyResult<PyDType> {
match DataType::try_from(self.0.elem_type) {
Ok(dt) => match dtype(dt) {
Some(dt) => Ok(PyDType(dt)),
None => Err(PyValueError::new_err(format!(
"unsupported 'value' data-type {dt:?}"
))),
},
type_ => Err(PyValueError::new_err(format!(
"unsupported input type {type_:?}"
))),
}
}
#[getter]
/// The shape of the tensor.
/// &RETURNS&: Tuple[Union[int,str,Any]]
fn shape(&self, py: Python) -> PyResult<Py<PyTuple>> {
let shape = PyList::empty_bound(py);
if let Some(d) = &self.0.shape {
for dim in d.dim.iter() {
if let Some(value) = &dim.value {
match value {
Value::DimValue(v) => shape.append(*v)?,
Value::DimParam(s) => shape.append(s.clone())?,
};
} else {
return Err(PyValueError::new_err("None value in shape"));
}
}
}
Ok(shape.to_tuple().into())
}
fn __repr__(&self, py: Python) -> String {
match (self.shape(py), self.dtype()) {
(Ok(shape), Ok(dtype)) => format!(
"TensorDescriptor[shape: {:?}, dtype: {:?}]",
shape.to_string(),
dtype.__str__()
),
(Err(_), Err(_)) => "TensorDescriptor[shape: unknown, dtype: unknown]".to_string(),
(Err(_), Ok(dtype)) => format!(
"TensorDescriptor[shape: unknown, dtype: {:?}]",
dtype.__str__()
),
(Ok(shape), Err(_)) => format!(
"TensorDescriptor[shape: {:?}, dtype: unknown]",
shape.to_string()
),
}
}
fn __str__(&self, py: Python) -> String {
self.__repr__(py)
}
}
#[derive(Clone, Debug)]
#[pyclass(name = "ONNXModel")]
/// A wrapper around an ONNX model.
pub struct PyONNXModel(ModelProto);
fn extract_tensor_descriptions(
value_infos: &[ValueInfoProto],
) -> HashMap<String, PyONNXTensorDescriptor> {
let mut map = HashMap::new();
for value_info in value_infos.iter() {
let input_type = match &value_info.r#type {
Some(input_type) => input_type,
None => continue,
};
let input_type = match &input_type.value {
Some(input_type) => input_type,
None => continue,
};
let tensor_type: &ONNXTensor = match input_type {
ONNXValue::TensorType(tt) => tt,
_ => continue,
};
map.insert(
value_info.name.to_string(),
PyONNXTensorDescriptor(tensor_type.clone()),
);
}
map
}
#[pymethods]
impl PyONNXModel {
#[new]
#[pyo3(text_signature = "(self, path:str)")]
/// Load an ONNX model from the given path.
fn new(path: String) -> PyResult<Self> {
let model: ModelProto = candle_onnx::read_file(path).map_err(wrap_err)?;
Ok(PyONNXModel(model))
}
#[getter]
/// The version of the IR this model targets.
/// &RETURNS&: int
fn ir_version(&self) -> i64 {
self.0.ir_version
}
#[getter]
/// The producer of the model.
/// &RETURNS&: str
fn producer_name(&self) -> String {
self.0.producer_name.clone()
}
#[getter]
/// The version of the producer of the model.
/// &RETURNS&: str
fn producer_version(&self) -> String {
self.0.producer_version.clone()
}
#[getter]
/// The domain of the operator set of the model.
/// &RETURNS&: str
fn domain(&self) -> String {
self.0.domain.clone()
}
#[getter]
/// The version of the model.
/// &RETURNS&: int
fn model_version(&self) -> i64 {
self.0.model_version
}
#[getter]
/// The doc string of the model.
/// &RETURNS&: str
fn doc_string(&self) -> String {
self.0.doc_string.clone()
}
/// Get the weights of the model.
/// &RETURNS&: Dict[str, Tensor]
fn initializers(&self) -> PyResult<HashMap<String, PyTensor>> {
let mut map = HashMap::new();
if let Some(graph) = self.0.graph.as_ref() {
for tensor_description in graph.initializer.iter() {
let tensor = get_tensor(tensor_description, tensor_description.name.as_str())
.map_err(wrap_err)?;
map.insert(tensor_description.name.to_string(), PyTensor(tensor));
}
}
Ok(map)
}
#[getter]
/// The inputs of the model.
/// &RETURNS&: Optional[Dict[str, ONNXTensorDescription]]
fn inputs(&self) -> Option<HashMap<String, PyONNXTensorDescriptor>> {
if let Some(graph) = self.0.graph.as_ref() {
return Some(extract_tensor_descriptions(&graph.input));
}
None
}
#[getter]
/// The outputs of the model.
/// &RETURNS&: Optional[Dict[str, ONNXTensorDescription]]
fn outputs(&self) -> Option<HashMap<String, PyONNXTensorDescriptor>> {
if let Some(graph) = self.0.graph.as_ref() {
return Some(extract_tensor_descriptions(&graph.output));
}
None
}
#[pyo3(text_signature = "(self, inputs:Dict[str,Tensor])")]
/// Run the model on the given inputs.
/// &RETURNS&: Dict[str,Tensor]
fn run(&self, inputs: HashMap<String, PyTensor>) -> PyResult<HashMap<String, PyTensor>> {
let unwrapped_tensors = inputs.into_iter().map(|(k, v)| (k.clone(), v.0)).collect();
let result = simple_eval(&self.0, unwrapped_tensors).map_err(wrap_err)?;
Ok(result
.into_iter()
.map(|(k, v)| (k.clone(), PyTensor(v)))
.collect())
}
}
| candle/candle-pyo3/src/onnx.rs/0 | {
"file_path": "candle/candle-pyo3/src/onnx.rs",
"repo_id": "candle",
"token_count": 3268
} | 55 |
pub mod generation;
pub mod models;
pub mod object_detection;
pub mod pipelines;
pub mod quantized_nn;
pub mod quantized_var_builder;
pub mod utils;
| candle/candle-transformers/src/lib.rs/0 | {
"file_path": "candle/candle-transformers/src/lib.rs",
"repo_id": "candle",
"token_count": 47
} | 56 |
//! ConvMixer implementation.
//!
//! See "Patches Are All You Need?" by Trockman et al. 2022
//!
//! - 📝 [Arxiv](https://arxiv.org/abs/2201.09792)
//! - 💻 [Github](https://github.com/locuslab/convmixer)
//!
use candle::Result;
use candle_nn::{batch_norm, Conv2dConfig, Module, VarBuilder};
#[allow(clippy::many_single_char_names)]
fn conv2d_same(
i: usize,
o: usize,
k: usize,
c: Conv2dConfig,
vb: VarBuilder,
) -> Result<impl Module> {
let conv2d = candle_nn::conv2d(i, o, k, c, vb)?;
let s = c.stride;
let module = candle_nn::func(move |xs| {
let ih = xs.dim(2)?;
let iw = xs.dim(3)?;
let oh = ih.div_ceil(s);
let ow = iw.div_ceil(s);
let pad_h = usize::max((oh - 1) * s + k - ih, 0);
let pad_w = usize::max((ow - 1) * s + k - iw, 0);
if pad_h > 0 || pad_w > 0 {
xs.pad_with_zeros(3, pad_w / 2, pad_w - pad_w / 2)?
.pad_with_zeros(2, pad_h / 2, pad_h - pad_h / 2)?
.apply(&conv2d)
} else {
xs.apply(&conv2d)
}
});
Ok(module)
}
fn block(dim: usize, kernel_size: usize, vb: VarBuilder) -> Result<impl Module> {
let conv2d_cfg = Conv2dConfig {
groups: dim,
..Default::default()
};
let vb_fn = vb.pp(0).pp("fn");
let conv1 = conv2d_same(dim, dim, kernel_size, conv2d_cfg, vb_fn.pp(0))?;
let bn1 = batch_norm(dim, 1e-5, vb_fn.pp(2))?;
let conv2 = candle_nn::conv2d(dim, dim, 1, Default::default(), vb.pp(1))?;
let bn2 = batch_norm(dim, 1e-5, vb.pp(3))?;
Ok(candle_nn::func(move |xs| {
let ys = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?;
(xs + ys)?.apply(&conv2)?.gelu_erf()?.apply_t(&bn2, false)
}))
}
fn convmixer(
nclasses: usize,
dim: usize,
depth: usize,
kernel_size: usize,
patch_size: usize,
vb: VarBuilder,
) -> Result<candle_nn::Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: patch_size,
..Default::default()
};
let conv1 = candle_nn::conv2d(3, dim, patch_size, conv2d_cfg, vb.pp(0))?;
let bn1 = batch_norm(dim, 1e-5, vb.pp(2))?;
let blocks: Vec<_> = (0..depth)
.map(|index| block(dim, kernel_size, vb.pp(3 + index)))
.collect::<Result<Vec<_>>>()?;
let fc = candle_nn::linear(dim, nclasses, vb.pp(25))?;
Ok(candle_nn::func(move |xs| {
let mut xs = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?;
for block in blocks.iter() {
xs = xs.apply(block)?
}
// This performs the adaptive average pooling with a target size of (1, 1).
xs.mean(3)?.mean(2)?.apply(&fc)
}))
}
pub fn c1536_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> {
convmixer(nclasses, 1536, 20, 9, 7, vb)
}
pub fn c1024_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> {
convmixer(nclasses, 1024, 20, 9, 14, vb)
}
| candle/candle-transformers/src/models/convmixer.rs/0 | {
"file_path": "candle/candle-transformers/src/models/convmixer.rs",
"repo_id": "candle",
"token_count": 1504
} | 57 |
use candle::{Result, Tensor, D};
use candle_nn::{conv2d, group_norm, Conv2d, GroupNorm, VarBuilder};
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/modules/autoencoder.py#L9
#[derive(Debug, Clone)]
pub struct Config {
pub resolution: usize,
pub in_channels: usize,
pub ch: usize,
pub out_ch: usize,
pub ch_mult: Vec<usize>,
pub num_res_blocks: usize,
pub z_channels: usize,
pub scale_factor: f64,
pub shift_factor: f64,
}
impl Config {
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/util.py#L47
pub fn dev() -> Self {
Self {
resolution: 256,
in_channels: 3,
ch: 128,
out_ch: 3,
ch_mult: vec![1, 2, 4, 4],
num_res_blocks: 2,
z_channels: 16,
scale_factor: 0.3611,
shift_factor: 0.1159,
}
}
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/util.py#L79
pub fn schnell() -> Self {
Self {
resolution: 256,
in_channels: 3,
ch: 128,
out_ch: 3,
ch_mult: vec![1, 2, 4, 4],
num_res_blocks: 2,
z_channels: 16,
scale_factor: 0.3611,
shift_factor: 0.1159,
}
}
}
fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
let dim = q.dim(D::Minus1)?;
let scale_factor = 1.0 / (dim as f64).sqrt();
let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?;
candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(v)
}
#[derive(Debug, Clone)]
struct AttnBlock {
q: Conv2d,
k: Conv2d,
v: Conv2d,
proj_out: Conv2d,
norm: GroupNorm,
}
impl AttnBlock {
fn new(in_c: usize, vb: VarBuilder) -> Result<Self> {
let q = conv2d(in_c, in_c, 1, Default::default(), vb.pp("q"))?;
let k = conv2d(in_c, in_c, 1, Default::default(), vb.pp("k"))?;
let v = conv2d(in_c, in_c, 1, Default::default(), vb.pp("v"))?;
let proj_out = conv2d(in_c, in_c, 1, Default::default(), vb.pp("proj_out"))?;
let norm = group_norm(32, in_c, 1e-6, vb.pp("norm"))?;
Ok(Self {
q,
k,
v,
proj_out,
norm,
})
}
}
impl candle::Module for AttnBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let init_xs = xs;
let xs = xs.apply(&self.norm)?;
let q = xs.apply(&self.q)?;
let k = xs.apply(&self.k)?;
let v = xs.apply(&self.v)?;
let (b, c, h, w) = q.dims4()?;
let q = q.flatten_from(2)?.t()?.unsqueeze(1)?;
let k = k.flatten_from(2)?.t()?.unsqueeze(1)?;
let v = v.flatten_from(2)?.t()?.unsqueeze(1)?;
let xs = scaled_dot_product_attention(&q, &k, &v)?;
let xs = xs.squeeze(1)?.t()?.reshape((b, c, h, w))?;
xs.apply(&self.proj_out)? + init_xs
}
}
#[derive(Debug, Clone)]
struct ResnetBlock {
norm1: GroupNorm,
conv1: Conv2d,
norm2: GroupNorm,
conv2: Conv2d,
nin_shortcut: Option<Conv2d>,
}
impl ResnetBlock {
fn new(in_c: usize, out_c: usize, vb: VarBuilder) -> Result<Self> {
let conv_cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let norm1 = group_norm(32, in_c, 1e-6, vb.pp("norm1"))?;
let conv1 = conv2d(in_c, out_c, 3, conv_cfg, vb.pp("conv1"))?;
let norm2 = group_norm(32, out_c, 1e-6, vb.pp("norm2"))?;
let conv2 = conv2d(out_c, out_c, 3, conv_cfg, vb.pp("conv2"))?;
let nin_shortcut = if in_c == out_c {
None
} else {
Some(conv2d(
in_c,
out_c,
1,
Default::default(),
vb.pp("nin_shortcut"),
)?)
};
Ok(Self {
norm1,
conv1,
norm2,
conv2,
nin_shortcut,
})
}
}
impl candle::Module for ResnetBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let h = xs
.apply(&self.norm1)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv1)?
.apply(&self.norm2)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv2)?;
match self.nin_shortcut.as_ref() {
None => xs + h,
Some(c) => xs.apply(c)? + h,
}
}
}
#[derive(Debug, Clone)]
struct Downsample {
conv: Conv2d,
}
impl Downsample {
fn new(in_c: usize, vb: VarBuilder) -> Result<Self> {
let conv_cfg = candle_nn::Conv2dConfig {
stride: 2,
..Default::default()
};
let conv = conv2d(in_c, in_c, 3, conv_cfg, vb.pp("conv"))?;
Ok(Self { conv })
}
}
impl candle::Module for Downsample {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.pad_with_zeros(D::Minus1, 0, 1)?;
let xs = xs.pad_with_zeros(D::Minus2, 0, 1)?;
xs.apply(&self.conv)
}
}
#[derive(Debug, Clone)]
struct Upsample {
conv: Conv2d,
}
impl Upsample {
fn new(in_c: usize, vb: VarBuilder) -> Result<Self> {
let conv_cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let conv = conv2d(in_c, in_c, 3, conv_cfg, vb.pp("conv"))?;
Ok(Self { conv })
}
}
impl candle::Module for Upsample {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_, _, h, w) = xs.dims4()?;
xs.upsample_nearest2d(h * 2, w * 2)?.apply(&self.conv)
}
}
#[derive(Debug, Clone)]
struct DownBlock {
block: Vec<ResnetBlock>,
downsample: Option<Downsample>,
}
#[derive(Debug, Clone)]
pub struct Encoder {
conv_in: Conv2d,
mid_block_1: ResnetBlock,
mid_attn_1: AttnBlock,
mid_block_2: ResnetBlock,
norm_out: GroupNorm,
conv_out: Conv2d,
down: Vec<DownBlock>,
}
impl Encoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let conv_cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let mut block_in = cfg.ch;
let conv_in = conv2d(cfg.in_channels, block_in, 3, conv_cfg, vb.pp("conv_in"))?;
let mut down = Vec::with_capacity(cfg.ch_mult.len());
let vb_d = vb.pp("down");
for (i_level, ch_mult) in cfg.ch_mult.iter().enumerate() {
let mut block = Vec::with_capacity(cfg.num_res_blocks);
let vb_d = vb_d.pp(i_level);
let vb_b = vb_d.pp("block");
let in_ch_mult = if i_level == 0 {
1
} else {
cfg.ch_mult[i_level - 1]
};
block_in = cfg.ch * in_ch_mult;
let block_out = cfg.ch * ch_mult;
for i_block in 0..cfg.num_res_blocks {
let b = ResnetBlock::new(block_in, block_out, vb_b.pp(i_block))?;
block.push(b);
block_in = block_out;
}
let downsample = if i_level != cfg.ch_mult.len() - 1 {
Some(Downsample::new(block_in, vb_d.pp("downsample"))?)
} else {
None
};
let block = DownBlock { block, downsample };
down.push(block)
}
let mid_block_1 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_1"))?;
let mid_attn_1 = AttnBlock::new(block_in, vb.pp("mid.attn_1"))?;
let mid_block_2 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_2"))?;
let conv_out = conv2d(block_in, 2 * cfg.z_channels, 3, conv_cfg, vb.pp("conv_out"))?;
let norm_out = group_norm(32, block_in, 1e-6, vb.pp("norm_out"))?;
Ok(Self {
conv_in,
mid_block_1,
mid_attn_1,
mid_block_2,
norm_out,
conv_out,
down,
})
}
}
impl candle_nn::Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut h = xs.apply(&self.conv_in)?;
for block in self.down.iter() {
for b in block.block.iter() {
h = h.apply(b)?
}
if let Some(ds) = block.downsample.as_ref() {
h = h.apply(ds)?
}
}
h.apply(&self.mid_block_1)?
.apply(&self.mid_attn_1)?
.apply(&self.mid_block_2)?
.apply(&self.norm_out)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv_out)
}
}
#[derive(Debug, Clone)]
struct UpBlock {
block: Vec<ResnetBlock>,
upsample: Option<Upsample>,
}
#[derive(Debug, Clone)]
pub struct Decoder {
conv_in: Conv2d,
mid_block_1: ResnetBlock,
mid_attn_1: AttnBlock,
mid_block_2: ResnetBlock,
norm_out: GroupNorm,
conv_out: Conv2d,
up: Vec<UpBlock>,
}
impl Decoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let conv_cfg = candle_nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let mut block_in = cfg.ch * cfg.ch_mult.last().unwrap_or(&1);
let conv_in = conv2d(cfg.z_channels, block_in, 3, conv_cfg, vb.pp("conv_in"))?;
let mid_block_1 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_1"))?;
let mid_attn_1 = AttnBlock::new(block_in, vb.pp("mid.attn_1"))?;
let mid_block_2 = ResnetBlock::new(block_in, block_in, vb.pp("mid.block_2"))?;
let mut up = Vec::with_capacity(cfg.ch_mult.len());
let vb_u = vb.pp("up");
for (i_level, ch_mult) in cfg.ch_mult.iter().enumerate().rev() {
let block_out = cfg.ch * ch_mult;
let vb_u = vb_u.pp(i_level);
let vb_b = vb_u.pp("block");
let mut block = Vec::with_capacity(cfg.num_res_blocks + 1);
for i_block in 0..=cfg.num_res_blocks {
let b = ResnetBlock::new(block_in, block_out, vb_b.pp(i_block))?;
block.push(b);
block_in = block_out;
}
let upsample = if i_level != 0 {
Some(Upsample::new(block_in, vb_u.pp("upsample"))?)
} else {
None
};
let block = UpBlock { block, upsample };
up.push(block)
}
up.reverse();
let norm_out = group_norm(32, block_in, 1e-6, vb.pp("norm_out"))?;
let conv_out = conv2d(block_in, cfg.out_ch, 3, conv_cfg, vb.pp("conv_out"))?;
Ok(Self {
conv_in,
mid_block_1,
mid_attn_1,
mid_block_2,
norm_out,
conv_out,
up,
})
}
}
impl candle_nn::Module for Decoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let h = xs.apply(&self.conv_in)?;
let mut h = h
.apply(&self.mid_block_1)?
.apply(&self.mid_attn_1)?
.apply(&self.mid_block_2)?;
for block in self.up.iter().rev() {
for b in block.block.iter() {
h = h.apply(b)?
}
if let Some(us) = block.upsample.as_ref() {
h = h.apply(us)?
}
}
h.apply(&self.norm_out)?
.apply(&candle_nn::Activation::Swish)?
.apply(&self.conv_out)
}
}
#[derive(Debug, Clone)]
pub struct DiagonalGaussian {
sample: bool,
chunk_dim: usize,
}
impl DiagonalGaussian {
pub fn new(sample: bool, chunk_dim: usize) -> Result<Self> {
Ok(Self { sample, chunk_dim })
}
}
impl candle_nn::Module for DiagonalGaussian {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let chunks = xs.chunk(2, self.chunk_dim)?;
if self.sample {
let std = (&chunks[1] * 0.5)?.exp()?;
&chunks[0] + (std * chunks[0].randn_like(0., 1.))?
} else {
Ok(chunks[0].clone())
}
}
}
#[derive(Debug, Clone)]
pub struct AutoEncoder {
encoder: Encoder,
decoder: Decoder,
reg: DiagonalGaussian,
shift_factor: f64,
scale_factor: f64,
}
impl AutoEncoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let decoder = Decoder::new(cfg, vb.pp("decoder"))?;
let reg = DiagonalGaussian::new(true, 1)?;
Ok(Self {
encoder,
decoder,
reg,
scale_factor: cfg.scale_factor,
shift_factor: cfg.shift_factor,
})
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let z = xs.apply(&self.encoder)?.apply(&self.reg)?;
(z - self.shift_factor)? * self.scale_factor
}
pub fn decode(&self, xs: &Tensor) -> Result<Tensor> {
let xs = ((xs / self.scale_factor)? + self.shift_factor)?;
xs.apply(&self.decoder)
}
}
impl candle::Module for AutoEncoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.decode(&self.encode(xs)?)
}
}
| candle/candle-transformers/src/models/flux/autoencoder.rs/0 | {
"file_path": "candle/candle-transformers/src/models/flux/autoencoder.rs",
"repo_id": "candle",
"token_count": 7145
} | 58 |
//! Llama2 inference implementation.
//!
//! See ["LLaMA 2: Open Foundation and Fine-Tuned Chat Models"](https://arxiv.org/abs/2307.09288)
//!
//! Based on the [llama2.c](https://github.com/karpathy/llama2.c) implementation
use byteorder::{LittleEndian, ReadBytesExt};
use candle::{DType, Device, IndexOp, Result, Shape, Tensor};
use candle_nn::VarBuilder;
use super::llama2_c::Config;
pub struct TransformerWeights {
// token embedding table
token_embedding_table: Tensor, // (vocab_size, dim)
// weights for rmsnorms
rms_att_weight: Tensor, // (layer, dim) rmsnorm weights
rms_ffn_weight: Tensor, // (layer, dim)
// weights for matmuls
wq: Tensor, // (layer, dim, dim)
wk: Tensor, // (layer, dim, dim)
wv: Tensor, // (layer, dim, dim)
wo: Tensor, // (layer, dim, dim)
// weights for ffn
w1: Tensor, // (layer, hidden_dim, dim)
w2: Tensor, // (layer, dim, hidden_dim)
w3: Tensor, // (layer, hidden_dim, dim)
// final rmsnorm
rms_final_weight: Tensor, // (dim,)
// freq_cis for RoPE relatively positional embeddings
freq_cis_real: Tensor, // (seq_len, head_size/2)
freq_cis_imag: Tensor, // (seq_len, head_size/2)
}
fn read_i32<R: std::io::Read>(r: &mut R) -> Result<i32> {
let mut buf = [0u8; 4];
r.read_exact(&mut buf)?;
Ok(i32::from_le_bytes(buf))
}
fn read_tensor<R: std::io::Read, S: Into<Shape>>(
r: &mut R,
shape: S,
dev: &Device,
) -> Result<Tensor> {
let shape = shape.into();
let mut data_t = vec![0f32; shape.elem_count()];
r.read_f32_into::<LittleEndian>(&mut data_t)?;
let tensor = Tensor::from_vec(data_t, shape, dev)?;
Ok(tensor)
}
impl Config {
pub fn from_reader<R: std::io::Read>(r: &mut R) -> Result<Self> {
let dim = read_i32(r)? as usize;
let hidden_dim = read_i32(r)? as usize;
let n_layers = read_i32(r)? as usize;
let n_heads = read_i32(r)? as usize;
let n_kv_heads = read_i32(r)? as usize;
let vocab_size = read_i32(r)? as usize;
let seq_len = read_i32(r)? as usize;
Ok(Self {
dim,
hidden_dim,
n_layers,
n_heads,
n_kv_heads,
vocab_size,
seq_len,
norm_eps: 1e-5,
})
}
pub fn head_size(&self) -> usize {
self.dim / self.n_heads
}
}
impl TransformerWeights {
pub fn from_reader<R: std::io::Read>(r: &mut R, c: &Config, dev: &Device) -> Result<Self> {
let token_embedding_table = read_tensor(r, (c.vocab_size, c.dim), dev)?;
let rms_att_weight = read_tensor(r, (c.n_layers, c.dim), dev)?;
let wq = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let wk = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let wv = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let wo = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?;
let rms_ffn_weight = read_tensor(r, (c.n_layers, c.dim), dev)?;
let w1 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?;
let w2 = read_tensor(r, (c.n_layers, c.dim, c.hidden_dim), dev)?;
let w3 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?;
let rms_final_weight = read_tensor(r, c.dim, dev)?;
let head_size = c.head_size();
let freq_cis_real = read_tensor(r, (c.seq_len, head_size / 2), dev)?;
let freq_cis_imag = read_tensor(r, (c.seq_len, head_size / 2), dev)?;
Ok(Self {
token_embedding_table,
rms_att_weight,
wq,
wk,
wv,
wo,
rms_ffn_weight,
w1,
w2,
w3,
rms_final_weight,
freq_cis_real,
freq_cis_imag,
})
}
pub fn var_builder(&self, cfg: &Config, device: &Device) -> Result<VarBuilder<'static>> {
// TODO: As of 2023-08-04, gemm is slower than expected when multiplying a matrix of
// size (1, k) with the transpose of a matrix of size (k, n) as it ends up transposing the
// second matrix back. We detect this case here and as a temporary hack make the weight
// matrix column major rather than row major. This ends up speeding up text generation from
// 120 token/s to 220 token/s on a Ryzen 2600X.
let tr = device.is_cpu() && !candle::utils::has_mkl();
let tr = |x: Tensor| if tr { x.t()?.contiguous()?.t() } else { Ok(x) };
let mut ws = std::collections::HashMap::new();
let mut insert = |name: &str, t: Tensor| {
ws.insert(name.to_string(), t);
};
insert("rot.freq_cis_real", self.freq_cis_real.clone());
insert("rot.freq_cis_imag", self.freq_cis_imag.clone());
insert(
"model.embed_tokens.weight",
self.token_embedding_table.clone(),
);
insert("lm_head.weight", tr(self.token_embedding_table.clone())?);
insert("model.norm.weight", self.rms_final_weight.clone());
for layer in 0..cfg.n_layers {
ws.insert(
format!("model.layers.{layer}.self_attn.q_proj.weight"),
tr(self.wq.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.self_attn.k_proj.weight"),
tr(self.wk.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.self_attn.v_proj.weight"),
tr(self.wv.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.self_attn.o_proj.weight"),
tr(self.wo.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.mlp.gate_proj.weight"),
tr(self.w1.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.mlp.down_proj.weight"),
tr(self.w2.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.mlp.up_proj.weight"),
tr(self.w3.i(layer)?)?,
);
ws.insert(
format!("model.layers.{layer}.input_layernorm.weight"),
self.rms_att_weight.i(layer)?,
);
ws.insert(
format!("model.layers.{layer}.post_attention_layernorm.weight"),
self.rms_ffn_weight.i(layer)?,
);
}
let vb = VarBuilder::from_tensors(ws, DType::F32, device);
Ok(vb)
}
}
| candle/candle-transformers/src/models/llama2_c_weights.rs/0 | {
"file_path": "candle/candle-transformers/src/models/llama2_c_weights.rs",
"repo_id": "candle",
"token_count": 3405
} | 59 |
use candle::{Module, Result, Tensor, D};
use candle_nn as nn;
use super::projections::{AttnProjections, Mlp, Qkv, QkvOnlyAttnProjections};
pub struct ModulateIntermediates {
gate_msa: Tensor,
shift_mlp: Tensor,
scale_mlp: Tensor,
gate_mlp: Tensor,
}
pub struct DiTBlock {
norm1: LayerNormNoAffine,
attn: AttnProjections,
norm2: LayerNormNoAffine,
mlp: Mlp,
ada_ln_modulation: nn::Sequential,
}
pub struct LayerNormNoAffine {
eps: f64,
}
impl LayerNormNoAffine {
pub fn new(eps: f64) -> Self {
Self { eps }
}
}
impl Module for LayerNormNoAffine {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
nn::LayerNorm::new_no_bias(Tensor::ones_like(x)?, self.eps).forward(x)
}
}
impl DiTBlock {
pub fn new(hidden_size: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> {
let norm1 = LayerNormNoAffine::new(1e-6);
let attn = AttnProjections::new(hidden_size, num_heads, vb.pp("attn"))?;
let norm2 = LayerNormNoAffine::new(1e-6);
let mlp_ratio = 4;
let mlp = Mlp::new(hidden_size, hidden_size * mlp_ratio, vb.pp("mlp"))?;
let n_mods = 6;
let ada_ln_modulation = nn::seq().add(nn::Activation::Silu).add(nn::linear(
hidden_size,
n_mods * hidden_size,
vb.pp("adaLN_modulation.1"),
)?);
Ok(Self {
norm1,
attn,
norm2,
mlp,
ada_ln_modulation,
})
}
pub fn pre_attention(&self, x: &Tensor, c: &Tensor) -> Result<(Qkv, ModulateIntermediates)> {
let modulation = self.ada_ln_modulation.forward(c)?;
let chunks = modulation.chunk(6, D::Minus1)?;
let (shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp) = (
chunks[0].clone(),
chunks[1].clone(),
chunks[2].clone(),
chunks[3].clone(),
chunks[4].clone(),
chunks[5].clone(),
);
let norm_x = self.norm1.forward(x)?;
let modulated_x = modulate(&norm_x, &shift_msa, &scale_msa)?;
let qkv = self.attn.pre_attention(&modulated_x)?;
Ok((
qkv,
ModulateIntermediates {
gate_msa,
shift_mlp,
scale_mlp,
gate_mlp,
},
))
}
pub fn post_attention(
&self,
attn: &Tensor,
x: &Tensor,
mod_interm: &ModulateIntermediates,
) -> Result<Tensor> {
let attn_out = self.attn.post_attention(attn)?;
let x = x.add(&attn_out.broadcast_mul(&mod_interm.gate_msa.unsqueeze(1)?)?)?;
let norm_x = self.norm2.forward(&x)?;
let modulated_x = modulate(&norm_x, &mod_interm.shift_mlp, &mod_interm.scale_mlp)?;
let mlp_out = self.mlp.forward(&modulated_x)?;
let x = x.add(&mlp_out.broadcast_mul(&mod_interm.gate_mlp.unsqueeze(1)?)?)?;
Ok(x)
}
}
pub struct SelfAttnModulateIntermediates {
gate_msa: Tensor,
shift_mlp: Tensor,
scale_mlp: Tensor,
gate_mlp: Tensor,
gate_msa2: Tensor,
}
pub struct SelfAttnDiTBlock {
norm1: LayerNormNoAffine,
attn: AttnProjections,
attn2: AttnProjections,
norm2: LayerNormNoAffine,
mlp: Mlp,
ada_ln_modulation: nn::Sequential,
}
impl SelfAttnDiTBlock {
pub fn new(hidden_size: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> {
let norm1 = LayerNormNoAffine::new(1e-6);
let attn = AttnProjections::new(hidden_size, num_heads, vb.pp("attn"))?;
let attn2 = AttnProjections::new(hidden_size, num_heads, vb.pp("attn2"))?;
let norm2 = LayerNormNoAffine::new(1e-6);
let mlp_ratio = 4;
let mlp = Mlp::new(hidden_size, hidden_size * mlp_ratio, vb.pp("mlp"))?;
let n_mods = 9;
let ada_ln_modulation = nn::seq().add(nn::Activation::Silu).add(nn::linear(
hidden_size,
n_mods * hidden_size,
vb.pp("adaLN_modulation.1"),
)?);
Ok(Self {
norm1,
attn,
attn2,
norm2,
mlp,
ada_ln_modulation,
})
}
pub fn pre_attention(
&self,
x: &Tensor,
c: &Tensor,
) -> Result<(Qkv, Qkv, SelfAttnModulateIntermediates)> {
let modulation = self.ada_ln_modulation.forward(c)?;
let chunks = modulation.chunk(9, D::Minus1)?;
let (
shift_msa,
scale_msa,
gate_msa,
shift_mlp,
scale_mlp,
gate_mlp,
shift_msa2,
scale_msa2,
gate_msa2,
) = (
chunks[0].clone(),
chunks[1].clone(),
chunks[2].clone(),
chunks[3].clone(),
chunks[4].clone(),
chunks[5].clone(),
chunks[6].clone(),
chunks[7].clone(),
chunks[8].clone(),
);
let norm_x = self.norm1.forward(x)?;
let modulated_x = modulate(&norm_x, &shift_msa, &scale_msa)?;
let qkv = self.attn.pre_attention(&modulated_x)?;
let modulated_x2 = modulate(&norm_x, &shift_msa2, &scale_msa2)?;
let qkv2 = self.attn2.pre_attention(&modulated_x2)?;
Ok((
qkv,
qkv2,
SelfAttnModulateIntermediates {
gate_msa,
shift_mlp,
scale_mlp,
gate_mlp,
gate_msa2,
},
))
}
pub fn post_attention(
&self,
attn: &Tensor,
attn2: &Tensor,
x: &Tensor,
mod_interm: &SelfAttnModulateIntermediates,
) -> Result<Tensor> {
let attn_out = self.attn.post_attention(attn)?;
let x = x.add(&attn_out.broadcast_mul(&mod_interm.gate_msa.unsqueeze(1)?)?)?;
let attn_out2 = self.attn2.post_attention(attn2)?;
let x = x.add(&attn_out2.broadcast_mul(&mod_interm.gate_msa2.unsqueeze(1)?)?)?;
let norm_x = self.norm2.forward(&x)?;
let modulated_x = modulate(&norm_x, &mod_interm.shift_mlp, &mod_interm.scale_mlp)?;
let mlp_out = self.mlp.forward(&modulated_x)?;
let x = x.add(&mlp_out.broadcast_mul(&mod_interm.gate_mlp.unsqueeze(1)?)?)?;
Ok(x)
}
}
pub struct QkvOnlyDiTBlock {
norm1: LayerNormNoAffine,
attn: QkvOnlyAttnProjections,
ada_ln_modulation: nn::Sequential,
}
impl QkvOnlyDiTBlock {
pub fn new(hidden_size: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> {
let norm1 = LayerNormNoAffine::new(1e-6);
let attn = QkvOnlyAttnProjections::new(hidden_size, num_heads, vb.pp("attn"))?;
let n_mods = 2;
let ada_ln_modulation = nn::seq().add(nn::Activation::Silu).add(nn::linear(
hidden_size,
n_mods * hidden_size,
vb.pp("adaLN_modulation.1"),
)?);
Ok(Self {
norm1,
attn,
ada_ln_modulation,
})
}
pub fn pre_attention(&self, x: &Tensor, c: &Tensor) -> Result<Qkv> {
let modulation = self.ada_ln_modulation.forward(c)?;
let chunks = modulation.chunk(2, D::Minus1)?;
let (shift_msa, scale_msa) = (chunks[0].clone(), chunks[1].clone());
let norm_x = self.norm1.forward(x)?;
let modulated_x = modulate(&norm_x, &shift_msa, &scale_msa)?;
self.attn.pre_attention(&modulated_x)
}
}
pub struct FinalLayer {
norm_final: LayerNormNoAffine,
linear: nn::Linear,
ada_ln_modulation: nn::Sequential,
}
impl FinalLayer {
pub fn new(
hidden_size: usize,
patch_size: usize,
out_channels: usize,
vb: nn::VarBuilder,
) -> Result<Self> {
let norm_final = LayerNormNoAffine::new(1e-6);
let linear = nn::linear(
hidden_size,
patch_size * patch_size * out_channels,
vb.pp("linear"),
)?;
let ada_ln_modulation = nn::seq().add(nn::Activation::Silu).add(nn::linear(
hidden_size,
2 * hidden_size,
vb.pp("adaLN_modulation.1"),
)?);
Ok(Self {
norm_final,
linear,
ada_ln_modulation,
})
}
pub fn forward(&self, x: &Tensor, c: &Tensor) -> Result<Tensor> {
let modulation = self.ada_ln_modulation.forward(c)?;
let chunks = modulation.chunk(2, D::Minus1)?;
let (shift, scale) = (chunks[0].clone(), chunks[1].clone());
let norm_x = self.norm_final.forward(x)?;
let modulated_x = modulate(&norm_x, &shift, &scale)?;
let output = self.linear.forward(&modulated_x)?;
Ok(output)
}
}
fn modulate(x: &Tensor, shift: &Tensor, scale: &Tensor) -> Result<Tensor> {
let shift = shift.unsqueeze(1)?;
let scale = scale.unsqueeze(1)?;
let scale_plus_one = scale.add(&Tensor::ones_like(&scale)?)?;
shift.broadcast_add(&x.broadcast_mul(&scale_plus_one)?)
}
pub trait JointBlock {
fn forward(&self, context: &Tensor, x: &Tensor, c: &Tensor) -> Result<(Tensor, Tensor)>;
}
pub struct MMDiTJointBlock {
x_block: DiTBlock,
context_block: DiTBlock,
num_heads: usize,
use_flash_attn: bool,
}
impl MMDiTJointBlock {
pub fn new(
hidden_size: usize,
num_heads: usize,
use_flash_attn: bool,
vb: nn::VarBuilder,
) -> Result<Self> {
let x_block = DiTBlock::new(hidden_size, num_heads, vb.pp("x_block"))?;
let context_block = DiTBlock::new(hidden_size, num_heads, vb.pp("context_block"))?;
Ok(Self {
x_block,
context_block,
num_heads,
use_flash_attn,
})
}
}
impl JointBlock for MMDiTJointBlock {
fn forward(&self, context: &Tensor, x: &Tensor, c: &Tensor) -> Result<(Tensor, Tensor)> {
let (context_qkv, context_interm) = self.context_block.pre_attention(context, c)?;
let (x_qkv, x_interm) = self.x_block.pre_attention(x, c)?;
let (context_attn, x_attn) =
joint_attn(&context_qkv, &x_qkv, self.num_heads, self.use_flash_attn)?;
let context_out =
self.context_block
.post_attention(&context_attn, context, &context_interm)?;
let x_out = self.x_block.post_attention(&x_attn, x, &x_interm)?;
Ok((context_out, x_out))
}
}
pub struct MMDiTXJointBlock {
x_block: SelfAttnDiTBlock,
context_block: DiTBlock,
num_heads: usize,
use_flash_attn: bool,
}
impl MMDiTXJointBlock {
pub fn new(
hidden_size: usize,
num_heads: usize,
use_flash_attn: bool,
vb: nn::VarBuilder,
) -> Result<Self> {
let x_block = SelfAttnDiTBlock::new(hidden_size, num_heads, vb.pp("x_block"))?;
let context_block = DiTBlock::new(hidden_size, num_heads, vb.pp("context_block"))?;
Ok(Self {
x_block,
context_block,
num_heads,
use_flash_attn,
})
}
}
impl JointBlock for MMDiTXJointBlock {
fn forward(&self, context: &Tensor, x: &Tensor, c: &Tensor) -> Result<(Tensor, Tensor)> {
let (context_qkv, context_interm) = self.context_block.pre_attention(context, c)?;
let (x_qkv, x_qkv2, x_interm) = self.x_block.pre_attention(x, c)?;
let (context_attn, x_attn) =
joint_attn(&context_qkv, &x_qkv, self.num_heads, self.use_flash_attn)?;
let x_attn2 = attn(&x_qkv2, self.num_heads, self.use_flash_attn)?;
let context_out =
self.context_block
.post_attention(&context_attn, context, &context_interm)?;
let x_out = self
.x_block
.post_attention(&x_attn, &x_attn2, x, &x_interm)?;
Ok((context_out, x_out))
}
}
pub struct ContextQkvOnlyJointBlock {
x_block: DiTBlock,
context_block: QkvOnlyDiTBlock,
num_heads: usize,
use_flash_attn: bool,
}
impl ContextQkvOnlyJointBlock {
pub fn new(
hidden_size: usize,
num_heads: usize,
use_flash_attn: bool,
vb: nn::VarBuilder,
) -> Result<Self> {
let x_block = DiTBlock::new(hidden_size, num_heads, vb.pp("x_block"))?;
let context_block = QkvOnlyDiTBlock::new(hidden_size, num_heads, vb.pp("context_block"))?;
Ok(Self {
x_block,
context_block,
num_heads,
use_flash_attn,
})
}
pub fn forward(&self, context: &Tensor, x: &Tensor, c: &Tensor) -> Result<Tensor> {
let context_qkv = self.context_block.pre_attention(context, c)?;
let (x_qkv, x_interm) = self.x_block.pre_attention(x, c)?;
let (_, x_attn) = joint_attn(&context_qkv, &x_qkv, self.num_heads, self.use_flash_attn)?;
let x_out = self.x_block.post_attention(&x_attn, x, &x_interm)?;
Ok(x_out)
}
}
// A QKV-attention that is compatible with the interface of candle_flash_attn::flash_attn
// Flash attention regards q, k, v dimensions as (batch_size, seqlen, nheads, headdim)
fn flash_compatible_attention(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
) -> Result<Tensor> {
let q_dims_for_matmul = q.transpose(1, 2)?.dims().to_vec();
let rank = q_dims_for_matmul.len();
let q = q.transpose(1, 2)?.flatten_to(rank - 3)?;
let k = k.transpose(1, 2)?.flatten_to(rank - 3)?;
let v = v.transpose(1, 2)?.flatten_to(rank - 3)?;
let attn_weights = (q.matmul(&k.t()?)? * softmax_scale as f64)?;
let attn_scores = candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(&v)?;
attn_scores.reshape(q_dims_for_matmul)?.transpose(1, 2)
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
fn joint_attn(
context_qkv: &Qkv,
x_qkv: &Qkv,
num_heads: usize,
use_flash_attn: bool,
) -> Result<(Tensor, Tensor)> {
let qkv = Qkv {
q: Tensor::cat(&[&context_qkv.q, &x_qkv.q], 1)?,
k: Tensor::cat(&[&context_qkv.k, &x_qkv.k], 1)?,
v: Tensor::cat(&[&context_qkv.v, &x_qkv.v], 1)?,
};
let seqlen = qkv.q.dim(1)?;
let attn = attn(&qkv, num_heads, use_flash_attn)?;
let context_qkv_seqlen = context_qkv.q.dim(1)?;
let context_attn = attn.narrow(1, 0, context_qkv_seqlen)?;
let x_attn = attn.narrow(1, context_qkv_seqlen, seqlen - context_qkv_seqlen)?;
Ok((context_attn, x_attn))
}
fn attn(qkv: &Qkv, num_heads: usize, use_flash_attn: bool) -> Result<Tensor> {
let batch_size = qkv.q.dim(0)?;
let seqlen = qkv.q.dim(1)?;
let qkv = Qkv {
q: qkv.q.reshape((batch_size, seqlen, num_heads, ()))?,
k: qkv.k.reshape((batch_size, seqlen, num_heads, ()))?,
v: qkv.v.clone(),
};
let headdim = qkv.q.dim(D::Minus1)?;
let softmax_scale = 1.0 / (headdim as f64).sqrt();
let attn = if use_flash_attn {
flash_attn(&qkv.q, &qkv.k, &qkv.v, softmax_scale as f32, false)?
} else {
flash_compatible_attention(&qkv.q, &qkv.k, &qkv.v, softmax_scale as f32)?
};
attn.reshape((batch_size, seqlen, ()))
}
| candle/candle-transformers/src/models/mmdit/blocks.rs/0 | {
"file_path": "candle/candle-transformers/src/models/mmdit/blocks.rs",
"repo_id": "candle",
"token_count": 8057
} | 60 |
//! Quantized MetaVoice model implementation.
//!
//! MetaVoice is a conditional text-to-speech model based on a transformer architecture.
//! This implementation provides quantization for reduced memory and compute.
//!
//! Key characteristics:
//! - Transformer-based autoregressive decoder
//! - Speaker conditioning
//! - Support for 8-bit quantization
//! - Key-value caching for efficient inference
//! - RMS normalization layers
//!
//! References:
//! - [MetaVoice Code](https://github.com/metavoiceio/metavoice)
//!
use crate::quantized_nn::{linear_b, Embedding, Linear, RmsNorm};
pub use crate::quantized_var_builder::VarBuilder;
use crate::models::metavoice::repeat_interleave;
use candle::{Module, Result, Tensor, D};
pub mod transformer {
use super::*;
type Config = crate::models::metavoice::transformer::Config;
#[derive(Debug, Clone)]
struct FeedForward {
w1: Linear,
w2: Linear,
w3: Linear,
span: tracing::Span,
}
impl FeedForward {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let i_size = cfg.intermediate_size();
let w1 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w1"))?;
let w2 = linear_b(i_size, cfg.dim, false, vb.pp("w2"))?;
let w3 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w3"))?;
Ok(Self {
w1,
w2,
w3,
span: tracing::span!(tracing::Level::TRACE, "feed-forward"),
})
}
}
impl Module for FeedForward {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let swiglu = (candle_nn::ops::silu(&xs.apply(&self.w1)?)? * xs.apply(&self.w3))?;
swiglu.apply(&self.w2)
}
}
#[derive(Debug, Clone)]
struct Attention {
wqkv: Linear,
wo: Linear,
dim: usize,
kv_size: usize,
n_local_heads: usize,
head_dim: usize,
n_head: usize,
kv_cache: Option<(Tensor, Tensor)>,
span: tracing::Span,
}
impl Attention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let n_local_heads = cfg.n_local_heads();
let head_dim = cfg.head_dim();
let total_head_dim = (cfg.n_head + 2 * n_local_heads) * head_dim;
let wqkv = linear_b(cfg.dim, total_head_dim, false, vb.pp("wqkv"))?;
let wo = linear_b(cfg.dim, cfg.dim, false, vb.pp("wo"))?;
Ok(Self {
wqkv,
wo,
dim: cfg.dim,
kv_size: n_local_heads * head_dim,
n_local_heads,
head_dim,
n_head: cfg.n_head,
kv_cache: None,
span: tracing::span!(tracing::Level::TRACE, "attention"),
})
}
fn forward(&mut self, xs: &Tensor, _pos: usize, mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, seqlen, _) = xs.dims3()?;
let qkv = xs.apply(&self.wqkv)?;
let q = qkv.narrow(D::Minus1, 0, self.dim)?;
let k = qkv.narrow(D::Minus1, self.dim, self.kv_size)?;
let v = qkv.narrow(D::Minus1, self.dim + self.kv_size, self.kv_size)?;
let q = q
.reshape((b_sz, seqlen, self.n_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))?
.transpose(1, 2)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &k], 2)?;
let v = Tensor::cat(&[prev_v, &v], 2)?;
(k, v)
}
};
self.kv_cache = Some((k.clone(), v.clone()));
let k = repeat_interleave(&k, self.n_head / self.n_local_heads, 1)?;
let v = repeat_interleave(&v, self.n_head / self.n_local_heads, 1)?;
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
let attn_weights = attn_weights.broadcast_add(mask)?;
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&v)?;
attn_output
.transpose(1, 2)?
.reshape((b_sz, seqlen, self.dim))?
.apply(&self.wo)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct Block {
attention: Attention,
feed_forward: FeedForward,
ffn_norm: RmsNorm,
attention_norm: RmsNorm,
span: tracing::Span,
}
impl Block {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = Attention::new(cfg, vb.pp("attention"))?;
let feed_forward = FeedForward::new(cfg, vb.pp("feed_forward"))?;
let ffn_norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("ffn_norm"))?;
let attention_norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("attention_norm"))?;
Ok(Self {
attention,
feed_forward,
ffn_norm,
attention_norm,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(&mut self, xs: &Tensor, pos: usize, mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hs = xs.apply(&self.attention_norm)?;
let hs = (xs + self.attention.forward(&hs, pos, mask))?;
&hs + hs.apply(&self.ffn_norm)?.apply(&self.feed_forward)
}
fn clear_kv_cache(&mut self) {
self.attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
tok_embeddings: Embedding,
pos_embeddings: Embedding,
speaker_cond_pos: Linear,
layers: Vec<Block>,
norm: RmsNorm,
output: Linear,
spk_cond_mask: Tensor,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let tok_embeddings = Embedding::new(cfg.vocab_size, cfg.dim, vb.pp("tok_embeddings"))?;
let pos_embeddings = Embedding::new(cfg.block_size, cfg.dim, vb.pp("pos_embeddings"))?;
let speaker_cond_pos = linear_b(
cfg.speaker_emb_dim,
cfg.dim,
false,
vb.pp("speaker_cond_pos"),
)?;
let mut layers = Vec::with_capacity(cfg.n_layer);
let vb_l = vb.pp("layers");
for layer_idx in 0..cfg.n_layer {
let layer = Block::new(cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("norm"))?;
let output = linear_b(cfg.dim, cfg.vocab_size, false, vb.pp("output"))?;
let spk_cond_mask = Tensor::cat(
&[
Tensor::ones((1, 1, cfg.dim), candle::DType::F32, vb.device())?,
Tensor::zeros((1, 1, cfg.dim), candle::DType::F32, vb.device())?,
],
0,
)?;
Ok(Self {
tok_embeddings,
pos_embeddings,
speaker_cond_pos,
layers,
norm,
output,
spk_cond_mask,
span: tracing::span!(tracing::Level::TRACE, "qtransformer"),
})
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
pub fn forward(&mut self, xs: &Tensor, spk_emb: &Tensor, pos: usize) -> Result<Tensor> {
let _enter = self.span.enter();
let (_b_sz, seqlen) = xs.dims2()?;
let mask: Vec<_> = (0..seqlen)
.flat_map(|i| (0..seqlen).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (1, 1, seqlen, seqlen), xs.device())?;
let input_pos = Tensor::arange(pos as u32, (pos + seqlen) as u32, xs.device())?;
let tok_embeddings = xs.apply(&self.tok_embeddings)?;
let pos_embeddings = input_pos.apply(&self.pos_embeddings)?;
let mut xs = tok_embeddings
.broadcast_add(&pos_embeddings)?
.broadcast_add(
&spk_emb
.apply(&self.speaker_cond_pos)?
.broadcast_mul(&self.spk_cond_mask)?,
)?;
let mask = mask.to_dtype(xs.dtype())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, pos, &mask)?
}
xs.narrow(1, seqlen - 1, 1)?
.contiguous()?
.apply(&self.norm)?
.apply(&self.output)
}
}
}
| candle/candle-transformers/src/models/quantized_metavoice.rs/0 | {
"file_path": "candle/candle-transformers/src/models/quantized_metavoice.rs",
"repo_id": "candle",
"token_count": 5192
} | 61 |
use crate::{
models::with_tracing::{linear_b, linear_no_bias, Linear, RmsNorm},
utils::repeat_kv,
};
use candle::{DType, Device, Module, Result, Tensor};
use candle_nn::{kv_cache::KvCache, Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub head_dim: usize,
pub attention_bias: bool,
pub num_key_value_heads: usize,
pub max_position_embeddings: usize,
pub sliding_window: Option<usize>,
pub max_window_layers: usize,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub rms_norm_eps: f64,
pub use_sliding_window: bool,
pub hidden_act: Activation,
}
#[derive(Debug, Clone)]
pub(crate) struct Qwen3RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl Qwen3RotaryEmbedding {
pub(crate) fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.head_dim;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(DType::F32)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?.to_dtype(dtype)?,
cos: freqs.cos()?.to_dtype(dtype)?,
})
}
/// Apply RoPE (q, k shape: B x H x L x D)
pub(crate) fn apply(&self, q: &Tensor, k: &Tensor, offset: usize) -> Result<(Tensor, Tensor)> {
let (_, _, seq_len, _) = q.dims4()?;
let cos = self.cos.narrow(0, offset, seq_len)?;
let sin = self.sin.narrow(0, offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
pub(crate) struct Qwen3MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl Qwen3MLP {
pub(crate) fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
Ok(Self {
gate_proj: linear_no_bias(cfg.hidden_size, cfg.intermediate_size, vb.pp("gate_proj"))?,
up_proj: linear_no_bias(cfg.hidden_size, cfg.intermediate_size, vb.pp("up_proj"))?,
down_proj: linear_no_bias(cfg.intermediate_size, cfg.hidden_size, vb.pp("down_proj"))?,
act_fn: cfg.hidden_act,
})
}
}
impl Module for Qwen3MLP {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let lhs = x.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = x.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
pub(crate) struct Qwen3Attention {
// projections
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
// norms
q_norm: RmsNorm,
k_norm: RmsNorm,
// hyper params
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
// utils
rotary_emb: Arc<Qwen3RotaryEmbedding>,
kv_cache: KvCache,
}
impl Qwen3Attention {
pub(crate) fn new(
cfg: &Config,
rotary_emb: Arc<Qwen3RotaryEmbedding>,
vb: VarBuilder,
) -> Result<Self> {
if cfg.use_sliding_window {
candle::bail!("sliding window is not suppored")
}
let head_dim = cfg.head_dim;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let q_proj = linear_b(
cfg.hidden_size,
num_heads * head_dim,
cfg.attention_bias,
vb.pp("q_proj"),
)?;
let k_proj = linear_b(
cfg.hidden_size,
num_kv_heads * head_dim,
cfg.attention_bias,
vb.pp("k_proj"),
)?;
let v_proj = linear_b(
cfg.hidden_size,
num_kv_heads * head_dim,
cfg.attention_bias,
vb.pp("v_proj"),
)?;
let o_proj = linear_b(
num_heads * head_dim,
cfg.hidden_size,
cfg.attention_bias,
vb.pp("o_proj"),
)?;
let q_norm = RmsNorm::new(head_dim, cfg.rms_norm_eps, vb.pp("q_norm"))?;
let k_norm = RmsNorm::new(head_dim, cfg.rms_norm_eps, vb.pp("k_norm"))?;
// Necessary because the hidden_size in the config isn't always accurate
let hidden_size = head_dim * cfg.num_attention_heads;
// Initialize KV cache with 512 tokens capacity to reduce initial memory allocation.
// The cache will grow in chunks of 512 tokens when needed.
let kv_cache = KvCache::new(2, 512);
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
q_norm,
k_norm,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size,
rotary_emb,
kv_cache,
})
}
pub(crate) fn forward(
&mut self,
x: &Tensor,
attn_mask: Option<&Tensor>,
offset: usize,
) -> Result<Tensor> {
let (b, l, _) = x.dims3()?;
// 1. Proj
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
// 2. Reshape: (B, L, H, D) -> (B, H, L, D)
let q = q
.reshape((b, l, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b, l, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b, l, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
// 3. Per‑head RMSNorm
let q_flat = q.flatten(0, 2)?; // (B*H, L, D) -> (BHL, D) after transpose later
let k_flat = k.flatten(0, 2)?;
let q_flat = self.q_norm.forward(&q_flat)?;
let k_flat = self.k_norm.forward(&k_flat)?;
let q = q_flat.reshape((b, self.num_heads, l, self.head_dim))?;
let k = k_flat.reshape((b, self.num_kv_heads, l, self.head_dim))?;
// 4. RoPE
let (q, k) = self.rotary_emb.apply(&q, &k, offset)?;
// 5. Accumulate KV cache
let (k, v) = self.kv_cache.append(&k.contiguous()?, &v.contiguous()?)?;
// 6. GQA repeat_kv
let k = repeat_kv(k, self.num_kv_groups)?;
let v = repeat_kv(v, self.num_kv_groups)?;
// 7. Attention score
let scale = 1.0 / (self.head_dim as f64).sqrt();
let mut scores = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
if let Some(m) = attn_mask {
scores = scores.broadcast_add(m)?;
}
let probs = candle_nn::ops::softmax_last_dim(&scores)?;
let ctx = probs.matmul(&v)?; // (B, H, L, D)
// 8. Output proj
ctx.transpose(1, 2)?
.reshape((b, l, self.hidden_size))?
.apply(&self.o_proj)
}
pub(crate) fn clear_kv_cache(&mut self) {
self.kv_cache.reset();
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Qwen3Attention,
mlp: Qwen3MLP,
ln1: RmsNorm,
ln2: RmsNorm,
}
impl DecoderLayer {
fn new(cfg: &Config, rotary: Arc<Qwen3RotaryEmbedding>, vb: VarBuilder) -> Result<Self> {
let self_attn = Qwen3Attention::new(cfg, rotary, vb.pp("self_attn"))?;
let mlp = Qwen3MLP::new(cfg, vb.pp("mlp"))?;
let ln1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let ln2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
ln1,
ln2,
})
}
fn forward(&mut self, x: &Tensor, mask: Option<&Tensor>, offset: usize) -> Result<Tensor> {
let h = self.ln1.forward(x)?;
let h = self.self_attn.forward(&h, mask, offset)?;
let x = (x + h)?;
let h2 = self.ln2.forward(&x)?;
let h2 = h2.apply(&self.mlp)?;
x + h2
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache();
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?;
let rotary = Arc::new(Qwen3RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb.pp("model.layers");
for i in 0..cfg.num_hidden_layers {
layers.push(DecoderLayer::new(cfg, rotary.clone(), vb_l.pp(i))?);
}
Ok(Self {
embed_tokens,
layers,
norm: RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn clear_kv_cache(&mut self) {
for l in &mut self.layers {
l.clear_kv_cache();
}
}
fn causal_mask(
&self,
b: usize,
tgt: usize,
offset: usize,
sw: Option<usize>,
) -> Result<Tensor> {
let minf = f32::NEG_INFINITY;
let mask: Vec<_> = (0..tgt)
.flat_map(|i| {
(0..(tgt + offset)).map(move |j| {
let past_ok = j <= i + offset;
let sw_ok = match sw {
Some(w) => (i + offset) as i64 - j as i64 <= w as i64,
None => true,
};
if past_ok && sw_ok {
0.
} else {
minf
}
})
})
.collect();
Tensor::from_slice(&mask, (b, 1, tgt, tgt + offset), &self.device)?.to_dtype(self.dtype)
}
pub fn forward(&mut self, input: &Tensor, offset: usize) -> Result<Tensor> {
let (b, l) = input.dims2()?;
let mut h = self.embed_tokens.forward(input)?;
let causal = if l == 1 {
None
} else {
Some(self.causal_mask(b, l, offset, None)?)
};
for layer in &mut self.layers {
h = layer.forward(&h, causal.as_ref(), offset)?;
}
self.norm.forward(&h)
}
}
#[derive(Debug, Clone)]
pub struct ModelForCausalLM {
base: Model,
lm_head: Linear,
}
impl ModelForCausalLM {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let base = Model::new(cfg, vb.clone())?;
let lm_head = if cfg.tie_word_embeddings {
Linear::from_weights(base.embed_tokens.embeddings().clone(), None)
} else {
linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
};
Ok(Self { base, lm_head })
}
pub fn forward(&mut self, input: &Tensor, offset: usize) -> Result<Tensor> {
let (_, l) = input.dims2()?;
self.base
.forward(input, offset)?
.narrow(1, l - 1, 1)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
self.base.clear_kv_cache();
}
}
| candle/candle-transformers/src/models/qwen3.rs/0 | {
"file_path": "candle/candle-transformers/src/models/qwen3.rs",
"repo_id": "candle",
"token_count": 6411
} | 62 |
#![allow(unused)]
//! Implementation of the Multi-Scale Neural Audio Codec (SNAC)
//!
//! See: [SNAC](https://github.com/hubertsiuzdak/snac)
//!
/// Multi-Scale Neural Audio Codec (SNAC) compresses audio into discrete codes at a low bitrate.
/// For more information, read the paper: https://arxiv.org/abs/2410.14411
///
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{
linear_b, Conv1d, Conv1dConfig, ConvTranspose1d, ConvTranspose1dConfig, LayerNorm, Linear,
VarBuilder,
};
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub sampling_rate: usize,
pub encoder_dim: usize,
pub encoder_rates: Vec<usize>,
pub decoder_dim: usize,
pub decoder_rates: Vec<usize>,
pub attn_window_size: Option<usize>,
pub codebook_size: usize,
pub codebook_dim: usize,
pub vq_strides: Vec<usize>,
pub noise: bool,
pub depthwise: bool,
}
// Equivalent to torch.repeat_interleave
pub fn repeat_interleave<D: candle::shape::Dim>(
img: &Tensor,
repeats: usize,
dim: D,
) -> Result<Tensor> {
if repeats == 1 {
return Ok(img.clone());
}
let dim = dim.to_index(img.shape(), "chunk")?;
let img = img.unsqueeze(dim + 1)?;
let mut dims = img.dims().to_vec();
dims[dim + 1] = repeats;
img.broadcast_as(dims)?.flatten(dim, dim + 1)
}
pub fn conv1d_weight_norm(
in_c: usize,
out_c: usize,
kernel_size: usize,
config: candle_nn::Conv1dConfig,
vb: VarBuilder,
) -> Result<Conv1d> {
let weight_g = vb.get((out_c, 1, 1), "parametrizations.weight.original0")?;
let weight_v = {
let name = "parametrizations.weight.original1";
match vb.get((out_c, in_c, kernel_size), name) {
Ok(v) => v,
Err(_) => vb.get((out_c, 1, kernel_size), name)?,
}
};
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
let bias = vb.get(out_c, "bias")?;
Ok(Conv1d::new(weight, Some(bias), config))
}
pub fn conv1d_weight_norm_no_bias(
in_c: usize,
out_c: usize,
kernel_size: usize,
config: candle_nn::Conv1dConfig,
vb: VarBuilder,
) -> Result<Conv1d> {
let weight_g = vb.get((out_c, 1, 1), "parametrizations.weight.original0")?;
let weight_v = {
let name = "parametrizations.weight.original1";
match vb.get((out_c, in_c, kernel_size), name) {
Ok(v) => v,
Err(_) => vb.get((out_c, 1, kernel_size), name)?,
}
};
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
Ok(Conv1d::new(weight, None, config))
}
pub fn conv_transpose1d_weight_norm(
in_c: usize,
out_c: usize,
kernel_size: usize,
bias: bool,
config: candle_nn::ConvTranspose1dConfig,
vb: VarBuilder,
) -> Result<ConvTranspose1d> {
let weight_g = vb.get((in_c, 1, 1), "parametrizations.weight.original0")?;
let weight_v = vb.get(
(in_c, out_c, kernel_size),
"parametrizations.weight.original1",
)?;
let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?;
let bias = if bias {
Some(vb.get(out_c, "bias")?)
} else {
None
};
Ok(ConvTranspose1d::new(weight, bias, config))
}
// https://github.com/hubertsiuzdak/snac/blob/main/snac/attention.py
#[allow(unused)]
#[derive(Debug, Clone)]
struct SinusoidalEmbeddings {
inv_freq: Tensor,
scale: Tensor,
scale_base: f32,
use_xpos: bool,
}
impl SinusoidalEmbeddings {
fn new(dim: usize, scale_base: f32, use_xpos: bool, dev: &Device) -> Result<Self> {
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10_000f32.powf(i as f32 / dim as f32))
.collect();
let len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, len, dev)?.to_dtype(DType::F32)?;
let scale: Vec<_> = (0..dim)
.step_by(2)
.map(|i| (i as f32 + 0.4 * dim as f32) / (1.4 * dim as f32))
.collect();
let scale = Tensor::from_vec(scale, len, dev)?.to_dtype(DType::F32)?;
Ok(Self {
inv_freq,
scale,
scale_base,
use_xpos,
})
}
}
#[allow(unused)]
#[derive(Debug, Clone)]
struct LocalMHA {
norm: LayerNorm,
to_qkv: Linear,
to_out: Linear,
num_heads: usize,
head_dim: usize,
rel_pos: Option<SinusoidalEmbeddings>,
}
impl LocalMHA {
fn new(
dim: usize,
window_size: usize,
dim_head: usize,
use_rotary_pos_emb: bool,
vb: VarBuilder,
) -> Result<Self> {
let norm = candle_nn::layer_norm(dim, 1e-5, vb.pp("norm"))?;
let to_qkv = linear_b(dim, dim * 3, false, vb.pp("to_qkv"))?;
let to_out = linear_b(dim, dim, false, vb.pp("to_out"))?;
let rel_pos = if use_rotary_pos_emb {
let rel_pos =
SinusoidalEmbeddings::new(dim_head, window_size as f32 / 2.0, false, vb.device())?;
Some(rel_pos)
} else {
None
};
Ok(Self {
norm,
to_qkv,
to_out,
rel_pos,
num_heads: dim / dim_head,
head_dim: dim_head,
})
}
}
impl Module for LocalMHA {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, c, t) = xs.dims3()?;
let residual = xs.clone();
let xs = xs.transpose(1, 2)?.apply(&self.norm)?;
let qkv = xs.apply(&self.to_qkv)?;
let q = qkv.narrow(D::Minus1, 0, c)?;
let k = qkv.narrow(D::Minus1, c, c)?;
let v = qkv.narrow(D::Minus1, 2 * c, c)?;
let q = q
.reshape((b, t, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b, t, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let v = v
.reshape((b, t, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let (q, k) = match self.rel_pos {
Some(_) => todo!(),
None => (q, k),
};
let out = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
// Non-causal attention
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&v)?
};
let out = out
.transpose(1, 2)?
.reshape((b, t, self.num_heads * self.head_dim))?
.apply(&self.to_out)?;
out.transpose(1, 2)? + residual
}
}
#[derive(Debug, Clone)]
struct Snake1d {
alpha: Tensor,
}
impl Snake1d {
pub fn new(channels: usize, vb: VarBuilder) -> Result<Self> {
let alpha = vb.get((1, channels, 1), "alpha")?;
Ok(Self { alpha })
}
}
impl Module for Snake1d {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs_shape = xs.shape();
let xs = xs.flatten_from(2)?;
let sin = self.alpha.broadcast_mul(&xs)?.sin()?;
let sin = (&sin * &sin)?;
(xs + (&self.alpha + 1e-9)?.recip()?.broadcast_mul(&sin)?)?.reshape(xs_shape)
}
}
#[derive(Debug, Clone)]
struct ResidualUnit {
snake1: Snake1d,
conv1: Conv1d,
snake2: Snake1d,
conv2: Conv1d,
}
impl ResidualUnit {
fn new(
dim: usize,
dilation: usize,
kernel: usize,
groups: usize,
vb: VarBuilder,
) -> Result<Self> {
let pad = ((kernel - 1) * dilation) / 2;
let vb = vb.pp("block");
let snake1 = Snake1d::new(dim, vb.pp(0))?;
let cfg1 = Conv1dConfig {
dilation,
padding: pad,
groups,
..Default::default()
};
let conv1 = conv1d_weight_norm(dim, dim, 7, cfg1, vb.pp(1))?;
let snake2 = Snake1d::new(dim, vb.pp(2))?;
let conv2 = conv1d_weight_norm(dim, dim, 1, Default::default(), vb.pp(3))?;
Ok(Self {
snake1,
conv1,
snake2,
conv2,
})
}
}
impl Module for ResidualUnit {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let ys = xs
.apply(&self.snake1)?
.apply(&self.conv1)?
.apply(&self.snake2)?
.apply(&self.conv2)?;
let pad = (xs.dim(D::Minus1)? - ys.dim(D::Minus1)?) / 2;
if pad > 0 {
&ys + xs.narrow(D::Minus1, pad, ys.dim(D::Minus1)?)
} else {
ys + xs
}
}
}
#[derive(Debug, Clone)]
struct NoiseBlock {
linear: Conv1d,
}
impl NoiseBlock {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let linear = conv1d_weight_norm_no_bias(dim, dim, 1, Default::default(), vb.pp("linear"))?;
Ok(Self { linear })
}
}
impl Module for NoiseBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, _c, t) = xs.dims3()?;
let noise = Tensor::randn(0f32, 1f32, (b, 1, t), xs.device())?;
let h = xs.apply(&self.linear)?;
let n = noise.broadcast_mul(&h)?;
let xs = (xs + n)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct DecoderBlock {
snake1: Snake1d,
conv_tr1: ConvTranspose1d,
noise: Option<NoiseBlock>,
res1: ResidualUnit,
res2: ResidualUnit,
res3: ResidualUnit,
}
impl DecoderBlock {
fn new(
in_dim: usize,
out_dim: usize,
stride: usize,
noise: bool,
groups: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("block");
let snake1 = Snake1d::new(in_dim, vb.pp(0))?;
let cfg = ConvTranspose1dConfig {
stride,
padding: stride.div_ceil(2),
output_padding: stride % 2,
..Default::default()
};
let conv_tr1 =
conv_transpose1d_weight_norm(in_dim, out_dim, 2 * stride, true, cfg, vb.pp(1))?;
let (n, noise) = if noise {
let noise = NoiseBlock::new(out_dim, vb.pp(2))?;
(1, Some(noise))
} else {
(0, None)
};
let res1 = ResidualUnit::new(out_dim, 1, 7, groups, vb.pp(2 + n))?;
let res2 = ResidualUnit::new(out_dim, 3, 7, groups, vb.pp(3 + n))?;
let res3 = ResidualUnit::new(out_dim, 9, 7, groups, vb.pp(4 + n))?;
Ok(Self {
snake1,
conv_tr1,
noise,
res1,
res2,
res3,
})
}
}
impl Module for DecoderBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.snake1)?
.apply(&self.conv_tr1)?
.apply(&self.noise.as_ref())?
.apply(&self.res1)?
.apply(&self.res2)?
.apply(&self.res3)
}
}
#[derive(Debug, Clone)]
struct EncoderBlock {
res1: ResidualUnit,
res2: ResidualUnit,
res3: ResidualUnit,
snake1: Snake1d,
conv1: Conv1d,
}
impl EncoderBlock {
fn new(
out_dim: usize,
in_dim: Option<usize>,
stride: usize,
groups: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("block");
let in_dim = in_dim.unwrap_or(out_dim / 2);
let res1 = ResidualUnit::new(in_dim, 1, 7, groups, vb.pp(0))?;
let res2 = ResidualUnit::new(in_dim, 3, 7, groups, vb.pp(1))?;
let res3 = ResidualUnit::new(in_dim, 9, 7, groups, vb.pp(2))?;
let snake1 = Snake1d::new(in_dim, vb.pp(3))?;
let cfg1 = Conv1dConfig {
stride,
padding: stride.div_ceil(2),
..Default::default()
};
let conv1 = conv1d_weight_norm(in_dim, out_dim, 2 * stride, cfg1, vb.pp(4))?;
Ok(Self {
res1,
res2,
res3,
snake1,
conv1,
})
}
}
impl candle::Module for EncoderBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.res1)?
.apply(&self.res2)?
.apply(&self.res3)?
.apply(&self.snake1)?
.apply(&self.conv1)
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
conv1: Conv1d,
blocks: Vec<EncoderBlock>,
local_mha: Option<LocalMHA>,
conv2: Conv1d,
}
impl candle::Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.conv1)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?
}
xs.apply(&self.conv2)
}
}
impl Encoder {
fn new(
mut d_model: usize,
strides: &[usize],
depthwise: bool,
attn_window_size: Option<usize>,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("block");
let mut idx = 0;
let cfg1 = Conv1dConfig {
padding: 3,
..Default::default()
};
let conv1 = conv1d_weight_norm(1, d_model, 7, cfg1, vb.pp(idx))?;
idx += 1;
let mut blocks = Vec::with_capacity(strides.len());
for &stride in strides.iter() {
d_model *= 2;
let groups = if depthwise { d_model / 2 } else { 1 };
let block = EncoderBlock::new(d_model, None, stride, groups, vb.pp(idx))?;
idx += 1;
blocks.push(block)
}
let local_mha = match attn_window_size {
Some(w) => {
let mha = LocalMHA::new(d_model, w, 64, true, vb.pp(idx))?;
idx += 1;
Some(mha)
}
None => None,
};
let groups = if depthwise { d_model } else { 1 };
let cfg2 = Conv1dConfig {
padding: 3,
groups,
..Default::default()
};
let conv2 = conv1d_weight_norm(d_model, d_model, 7, cfg2, vb.pp(idx))?;
idx += 1;
Ok(Self {
conv1,
blocks,
local_mha,
conv2,
})
}
}
#[derive(Debug, Clone)]
enum ConvInit {
Depthwise(Conv1d, Conv1d),
Standard(Conv1d),
}
#[derive(Debug, Clone)]
pub struct Decoder {
conv1: ConvInit,
local_mha: Option<LocalMHA>,
blocks: Vec<DecoderBlock>,
snake1: Snake1d,
conv2: Conv1d,
}
impl Decoder {
#[allow(clippy::too_many_arguments)]
fn new(
in_c: usize,
mut channels: usize,
rates: &[usize],
noise: bool,
depthwise: bool,
attn_window_size: Option<usize>,
d_out: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("model");
let mut idx = 0;
let pad3 = Conv1dConfig {
padding: 3,
..Default::default()
};
let conv1 = if depthwise {
let cfg1 = Conv1dConfig {
padding: 3,
groups: in_c,
..Default::default()
};
let conv1 = conv1d_weight_norm(in_c, in_c, 7, cfg1, vb.pp(idx))?;
idx += 1;
let conv2 = conv1d_weight_norm(in_c, channels, 1, Default::default(), vb.pp(idx))?;
idx += 1;
ConvInit::Depthwise(conv1, conv2)
} else {
let conv1 = conv1d_weight_norm(in_c, channels, 7, pad3, vb.pp(idx))?;
idx += 1;
ConvInit::Standard(conv1)
};
let mut blocks = Vec::with_capacity(rates.len());
let local_mha = match attn_window_size {
Some(w) => {
let mha = LocalMHA::new(channels, w, 64, true, vb.pp(idx))?;
idx += 1;
Some(mha)
}
None => None,
};
for stride in rates.iter() {
let groups = if depthwise { channels / 2 } else { 1 };
let block =
DecoderBlock::new(channels, channels / 2, *stride, noise, groups, vb.pp(idx))?;
idx += 1;
channels /= 2;
blocks.push(block)
}
let snake1 = Snake1d::new(channels, vb.pp(idx))?;
idx += 1;
let conv2 = conv1d_weight_norm(channels, d_out, 7, pad3, vb.pp(idx))?;
idx += 1;
Ok(Self {
conv1,
local_mha,
blocks,
snake1,
conv2,
})
}
}
impl candle::Module for Decoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = match &self.conv1 {
ConvInit::Standard(c) => xs.apply(c)?,
ConvInit::Depthwise(c1, c2) => xs.apply(c1)?.apply(c2)?,
};
for block in self.blocks.iter() {
xs = xs.apply(block)?
}
xs.apply(&self.snake1)?.apply(&self.conv2)
}
}
fn normalize(v: &Tensor) -> Result<Tensor> {
v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)
}
// https://github.com/hubertsiuzdak/snac/blob/main/snac/vq.py
#[allow(unused)]
#[derive(Clone, Debug)]
struct VectorQuantizer {
in_proj: Conv1d,
out_proj: Conv1d,
codebook: candle_nn::Embedding,
stride: usize,
}
impl VectorQuantizer {
fn new(
in_dim: usize,
cb_size: usize,
cb_dim: usize,
stride: usize,
vb: VarBuilder,
) -> Result<Self> {
let in_proj = conv1d_weight_norm(in_dim, cb_dim, 1, Default::default(), vb.pp("in_proj"))?;
let out_proj =
conv1d_weight_norm(cb_dim, in_dim, 1, Default::default(), vb.pp("out_proj"))?;
let codebook = candle_nn::embedding(cb_size, cb_dim, vb.pp("codebook"))?;
Ok(Self {
in_proj,
out_proj,
codebook,
stride,
})
}
fn decode_latents(&self, latents: &Tensor) -> Result<(Tensor, Tensor)> {
let (b, d, t) = latents.dims3()?;
let encodings = latents.transpose(1, 2)?.reshape((b * t, d))?;
let encodings = normalize(&encodings)?;
let codebook = normalize(self.codebook.embeddings())?;
let dist = (encodings
.sqr()?
.sum_keepdim(1)?
.broadcast_sub(&encodings.matmul(&codebook.t()?)?)?
* 2.0)?
.broadcast_add(&codebook.sqr()?.sum_keepdim(1)?.t()?)?;
let indices = dist.argmin(1)?.reshape((b, ()))?;
let z_q = self.decode_code(&indices)?;
Ok((z_q, indices))
}
fn encode(&self, z: &Tensor) -> Result<(Tensor, Tensor)> {
let z = if self.stride > 1 {
let (b, c, t) = z.dims3()?;
z.reshape((b, c, 1, t))?
.avg_pool2d((1, self.stride))?
.squeeze(2)?
} else {
z.clone()
};
let z_e = z.apply(&self.in_proj)?;
let (z_q, indices) = self.decode_latents(&z_e)?;
let z_q = z_q.apply(&self.out_proj)?;
let z_q = if self.stride > 1 {
repeat_interleave(&z_q, self.stride, D::Minus1)?
} else {
z_q
};
Ok((z_q, indices))
}
fn embed_code(&self, embed_id: &Tensor) -> Result<Tensor> {
embed_id.apply(&self.codebook)
}
fn decode_code(&self, embed_id: &Tensor) -> Result<Tensor> {
self.embed_code(embed_id)?.transpose(1, 2)
}
}
#[derive(Clone, Debug)]
pub struct ResidualVectorQuantizer {
quantizers: Vec<VectorQuantizer>,
}
impl ResidualVectorQuantizer {
fn new(
input_dim: usize,
cb_size: usize,
cb_dim: usize,
vq_strides: &[usize],
vb: VarBuilder,
) -> Result<Self> {
let vb = &vb.pp("quantizers");
let quantizers = vq_strides
.iter()
.enumerate()
.map(|(i, stride)| VectorQuantizer::new(input_dim, cb_size, cb_dim, *stride, vb.pp(i)))
.collect::<Result<Vec<_>>>()?;
Ok(Self { quantizers })
}
fn encode(&self, z: &Tensor) -> Result<(Tensor, Vec<Tensor>)> {
let mut residual = z.clone();
let mut z_q = z.zeros_like()?;
let mut codes = Vec::with_capacity(self.quantizers.len());
for quantizer in self.quantizers.iter() {
let (z_q_i, indices_i) = quantizer.encode(&residual)?;
z_q = (z_q + &z_q_i)?;
residual = (residual - &z_q_i)?;
codes.push(indices_i)
}
Ok((z_q, codes))
}
#[allow(clippy::wrong_self_convention)]
fn from_codes(&self, codes: &[&Tensor]) -> Result<Tensor> {
let mut sum = None;
for (quantizer, codes) in self.quantizers.iter().zip(codes.iter()) {
let z_p_i = quantizer.decode_code(codes)?;
let z_q_i = z_p_i.apply(&quantizer.out_proj)?;
let z_q_i = repeat_interleave(&z_q_i, quantizer.stride, D::Minus1)?;
let s = match sum {
None => z_q_i,
Some(s) => (s + z_q_i)?,
};
sum = Some(s)
}
match sum {
Some(s) => Ok(s),
None => candle::bail!("empty codebooks"),
}
}
}
fn gcd(mut a: usize, mut b: usize) -> usize {
while b != 0 {
let t = b;
b = a % b;
a = t;
}
a
}
fn lcm(a: usize, b: usize) -> usize {
a / gcd(a, b) * b
}
// https://github.com/hubertsiuzdak/snac/blob/main/snac/snac.py
#[derive(Debug, Clone)]
pub struct Model {
pub encoder: Encoder,
pub quantizer: ResidualVectorQuantizer,
pub decoder: Decoder,
pub hop_length: usize,
pub config: Config,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(
cfg.encoder_dim,
&cfg.encoder_rates,
cfg.depthwise,
cfg.attn_window_size,
vb.pp("encoder"),
)?;
let latent_dim = cfg.encoder_dim * 2usize.pow(cfg.encoder_rates.len() as u32);
let quantizer = ResidualVectorQuantizer::new(
latent_dim,
cfg.codebook_size,
cfg.codebook_dim,
&cfg.vq_strides,
vb.pp("quantizer"),
)?;
let decoder = Decoder::new(
latent_dim,
cfg.decoder_dim,
&cfg.decoder_rates,
cfg.noise,
cfg.depthwise,
cfg.attn_window_size,
/* d_out */ 1,
vb.pp("decoder"),
)?;
let hop_length = cfg.encoder_rates.iter().product::<usize>();
Ok(Self {
encoder,
decoder,
quantizer,
config: cfg.clone(),
hop_length,
})
}
fn preprocess(&self, audio_data: &Tensor) -> Result<Tensor> {
let len = audio_data.dim(D::Minus1)?;
let lcm = lcm(
self.config.vq_strides[0],
self.config.attn_window_size.unwrap_or(1),
);
let pad_to = self.hop_length * lcm;
let right_pad = len.div_ceil(pad_to) * pad_to - len;
let audio_data = audio_data.pad_with_zeros(D::Minus1, 0, right_pad)?;
Ok(audio_data)
}
pub fn encode(&self, audio_data: &Tensor) -> Result<Vec<Tensor>> {
let audio_data = self.preprocess(audio_data)?;
let z = self.encoder.forward(&audio_data)?;
let (_, codes) = self.quantizer.encode(&z)?;
Ok(codes)
}
pub fn decode(&self, audio_codes: &[&Tensor]) -> Result<Tensor> {
let audio_values = self.quantizer.from_codes(audio_codes)?;
audio_values.apply(&self.decoder)
}
pub fn config(&self) -> &Config {
&self.config
}
pub fn num_codebooks(&self) -> usize {
self.quantizer.quantizers.len()
}
}
| candle/candle-transformers/src/models/snac.rs/0 | {
"file_path": "candle/candle-transformers/src/models/snac.rs",
"repo_id": "candle",
"token_count": 12742
} | 63 |
use candle::{DType, Module, Result, Tensor, D};
use candle_nn::VarBuilder;
// https://github.com/huggingface/diffusers/blob/19edca82f1ff194c07317369a92b470dbae97f34/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py#L22
#[derive(Debug)]
pub struct WLayerNorm {
eps: f64,
}
impl WLayerNorm {
pub fn new(_size: usize) -> Result<Self> {
Ok(Self { eps: 1e-6 })
}
}
impl Module for WLayerNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.permute((0, 2, 3, 1))?;
let x_dtype = xs.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = xs.dim(D::Minus1)?;
let xs = xs.to_dtype(internal_dtype)?;
let mean_x = (xs.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let xs = xs.broadcast_sub(&mean_x)?;
let norm_x = (xs.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
xs.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?
.to_dtype(x_dtype)?
.permute((0, 3, 1, 2))
}
}
#[derive(Debug)]
pub struct LayerNormNoWeights {
eps: f64,
}
impl LayerNormNoWeights {
pub fn new(_size: usize) -> Result<Self> {
Ok(Self { eps: 1e-6 })
}
}
impl Module for LayerNormNoWeights {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let x_dtype = xs.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = xs.dim(D::Minus1)?;
let xs = xs.to_dtype(internal_dtype)?;
let mean_x = (xs.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let xs = xs.broadcast_sub(&mean_x)?;
let norm_x = (xs.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
xs.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?
.to_dtype(x_dtype)
}
}
#[derive(Debug)]
pub struct TimestepBlock {
mapper: candle_nn::Linear,
}
impl TimestepBlock {
pub fn new(c: usize, c_timestep: usize, vb: VarBuilder) -> Result<Self> {
let mapper = candle_nn::linear(c_timestep, c * 2, vb.pp("mapper"))?;
Ok(Self { mapper })
}
pub fn forward(&self, xs: &Tensor, t: &Tensor) -> Result<Tensor> {
let ab = self
.mapper
.forward(t)?
.unsqueeze(2)?
.unsqueeze(3)?
.chunk(2, 1)?;
xs.broadcast_mul(&(&ab[0] + 1.)?)?.broadcast_add(&ab[1])
}
}
#[derive(Debug)]
pub struct GlobalResponseNorm {
gamma: Tensor,
beta: Tensor,
}
impl GlobalResponseNorm {
pub fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let gamma = vb.get((1, 1, 1, dim), "gamma")?;
let beta = vb.get((1, 1, 1, dim), "beta")?;
Ok(Self { gamma, beta })
}
}
impl Module for GlobalResponseNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let agg_norm = xs.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let stand_div_norm =
agg_norm.broadcast_div(&(agg_norm.mean_keepdim(D::Minus1)? + 1e-6)?)?;
xs.broadcast_mul(&stand_div_norm)?
.broadcast_mul(&self.gamma)?
.broadcast_add(&self.beta)?
+ xs
}
}
#[derive(Debug)]
pub struct ResBlock {
depthwise: candle_nn::Conv2d,
norm: WLayerNorm,
channelwise_lin1: candle_nn::Linear,
channelwise_grn: GlobalResponseNorm,
channelwise_lin2: candle_nn::Linear,
}
impl ResBlock {
pub fn new(c: usize, c_skip: usize, ksize: usize, vb: VarBuilder) -> Result<Self> {
let cfg = candle_nn::Conv2dConfig {
padding: ksize / 2,
groups: c,
..Default::default()
};
let depthwise = candle_nn::conv2d(c + c_skip, c, ksize, cfg, vb.pp("depthwise"))?;
let norm = WLayerNorm::new(c)?;
let channelwise_lin1 = candle_nn::linear(c, c * 4, vb.pp("channelwise.0"))?;
let channelwise_grn = GlobalResponseNorm::new(c * 4, vb.pp("channelwise.2"))?;
let channelwise_lin2 = candle_nn::linear(c * 4, c, vb.pp("channelwise.4"))?;
Ok(Self {
depthwise,
norm,
channelwise_lin1,
channelwise_grn,
channelwise_lin2,
})
}
pub fn forward(&self, xs: &Tensor, x_skip: Option<&Tensor>) -> Result<Tensor> {
let x_res = xs;
let xs = match x_skip {
None => xs.clone(),
Some(x_skip) => Tensor::cat(&[xs, x_skip], 1)?,
};
let xs = xs
.apply(&self.depthwise)?
.apply(&self.norm)?
.permute((0, 2, 3, 1))?;
let xs = xs
.apply(&self.channelwise_lin1)?
.gelu_erf()?
.apply(&self.channelwise_grn)?
.apply(&self.channelwise_lin2)?
.permute((0, 3, 1, 2))?;
xs + x_res
}
}
use super::attention_processor::Attention;
#[derive(Debug)]
pub struct AttnBlock {
self_attn: bool,
norm: WLayerNorm,
attention: Attention,
kv_mapper_lin: candle_nn::Linear,
}
impl AttnBlock {
pub fn new(
c: usize,
c_cond: usize,
nhead: usize,
self_attn: bool,
use_flash_attn: bool,
vb: VarBuilder,
) -> Result<Self> {
let norm = WLayerNorm::new(c)?;
let attention = Attention::new(c, nhead, c / nhead, use_flash_attn, vb.pp("attention"))?;
let kv_mapper_lin = candle_nn::linear(c_cond, c, vb.pp("kv_mapper.1"))?;
Ok(Self {
self_attn,
norm,
attention,
kv_mapper_lin,
})
}
pub fn forward(&self, xs: &Tensor, kv: &Tensor) -> Result<Tensor> {
let kv = candle_nn::ops::silu(kv)?.apply(&self.kv_mapper_lin)?;
let norm_xs = self.norm.forward(xs)?;
let kv = if self.self_attn {
let (b_size, channel, _, _) = xs.dims4()?;
let norm_xs = norm_xs.reshape((b_size, channel, ()))?.transpose(1, 2)?;
Tensor::cat(&[&norm_xs, &kv], 1)?.contiguous()?
} else {
kv
};
xs + self.attention.forward(&norm_xs, &kv)
}
}
| candle/candle-transformers/src/models/wuerstchen/common.rs/0 | {
"file_path": "candle/candle-transformers/src/models/wuerstchen/common.rs",
"repo_id": "candle",
"token_count": 3219
} | 64 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.