text stringlengths 5 631k | id stringlengths 14 178 | metadata dict | __index_level_0__ int64 0 647 |
|---|---|---|---|
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_bs4
from transformers.utils import is_bs4_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bs4_available():
from transformers import MarkupLMFeatureExtractor
class MarkupLMFeatureExtractionTester:
def __init__(self, parent):
self.parent = parent
def prepare_feat_extract_dict(self):
return {}
def get_html_strings():
html_string_1 = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
html_string_2 = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_1, html_string_2]
@require_bs4
class MarkupLMFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = MarkupLMFeatureExtractor if is_bs4_available() else None
def setUp(self):
self.feature_extract_tester = MarkupLMFeatureExtractionTester(self)
@property
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_call(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class()
# Test not batched input
html_string = get_html_strings()[0]
encoding = feature_extractor(html_string)
# fmt: off
expected_nodes = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
expected_xpaths = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes, expected_nodes)
self.assertEqual(encoding.xpaths, expected_xpaths)
# Test batched
html_strings = get_html_strings()
encoding = feature_extractor(html_strings)
# fmt: off
expected_nodes = expected_nodes + [['My First Heading', 'My first paragraph.']]
expected_xpaths = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes), 2)
self.assertEqual(len(encoding.xpaths), 2)
self.assertEqual(encoding.nodes, expected_nodes)
self.assertEqual(encoding.xpaths, expected_xpaths)
| transformers/tests/models/markuplm/test_feature_extraction_markuplm.py/0 | {
"file_path": "transformers/tests/models/markuplm/test_feature_extraction_markuplm.py",
"repo_id": "transformers",
"token_count": 1471
} | 560 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
from packaging import version
from transformers import AutoTokenizer, MobileBertConfig, MobileBertForMaskedLM, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class MobileBertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=64,
embedding_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return MobileBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
def create_and_check_mobilebert_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_mobilebert_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_mobilebert_for_next_sequence_prediction(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForNextSentencePrediction(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def create_and_check_mobilebert_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def create_and_check_mobilebert_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_mobilebert_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = MobileBertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_mobilebert_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = MobileBertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_mobilebert_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = MobileBertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class MobileBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["next_sentence_label"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
# TODO (@SunMarc): Fix me
@unittest.skip(reason="It's broken.")
def test_resize_tokens_embeddings(self):
super().test_resize_tokens_embeddings()
def setUp(self):
self.model_tester = MobileBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=MobileBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_mobilebert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*config_and_inputs)
def test_for_next_sequence_prediction(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*config_and_inputs)
def _long_tensor(tok_lst):
return torch.tensor(
tok_lst,
dtype=torch.long,
device=torch_device,
)
TOLERANCE = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class MobileBertModelIntegrationTests(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = MobileBertModel.from_pretrained("google/mobilebert-uncased").to(torch_device)
input_ids = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 9, 512))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[
[
[-2.4736526e07, 8.2691656e04, 1.6521838e05],
[-5.7541704e-01, 3.9056022e00, 4.4011507e00],
[2.6047359e00, 1.5677652e00, -1.7324188e-01],
]
],
device=torch_device,
)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lower_bound = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE)
upper_bound = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE)
self.assertTrue(lower_bound and upper_bound)
@pytest.mark.torch_export_test
@slow
def test_export(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
mobilebert_model = "google/mobilebert-uncased"
device = "cpu"
attn_implementation = "eager"
max_length = 512
tokenizer = AutoTokenizer.from_pretrained(mobilebert_model)
inputs = tokenizer(
f"the man worked as a {tokenizer.mask_token}.",
return_tensors="pt",
padding="max_length",
max_length=max_length,
)
model = MobileBertForMaskedLM.from_pretrained(
mobilebert_model,
device_map=device,
attn_implementation=attn_implementation,
)
logits = model(**inputs).logits
eg_predicted_mask = tokenizer.decode(logits[0, 6].topk(5).indices)
self.assertEqual(eg_predicted_mask.split(), ["carpenter", "waiter", "mechanic", "teacher", "clerk"])
exported_program = torch.export.export(
model,
args=(inputs["input_ids"],),
kwargs={"attention_mask": inputs["attention_mask"]},
strict=True,
)
result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"])
ep_predicted_mask = tokenizer.decode(result.logits[0, 6].topk(5).indices)
self.assertEqual(eg_predicted_mask, ep_predicted_mask)
| transformers/tests/models/mobilebert/test_modeling_mobilebert.py/0 | {
"file_path": "transformers/tests/models/mobilebert/test_modeling_mobilebert.py",
"repo_id": "transformers",
"token_count": 7780
} | 561 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from packaging import version
from transformers import AutoTokenizer, ModernBertDecoderConfig, is_torch_available
from transformers.testing_utils import (
require_torch,
slow,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
from ...test_modeling_common import _config_zero_init
if is_torch_available():
import torch
from transformers import (
ModernBertDecoderForCausalLM,
ModernBertDecoderForSequenceClassification,
ModernBertDecoderModel,
)
class ModernBertDecoderModelTester(CausalLMModelTester):
config_class = ModernBertDecoderConfig
if is_torch_available():
base_model_class = ModernBertDecoderModel
causal_lm_class = ModernBertDecoderForCausalLM
@require_torch
class ModernBertDecoderModelTest(CausalLMModelTest, unittest.TestCase):
all_model_classes = (
(ModernBertDecoderModel, ModernBertDecoderForCausalLM, ModernBertDecoderForSequenceClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": ModernBertDecoderModel,
"text-generation": ModernBertDecoderForCausalLM,
"text-classification": ModernBertDecoderForSequenceClassification,
}
if is_torch_available()
else {}
)
test_head_masking = False
test_pruning = False
model_tester_class = ModernBertDecoderModelTester
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
# The classifier.weight from ModernBertDecoderForSequenceClassification
# is initialized without `initializer_range`, so it's not set to ~0 via the _config_zero_init
if param.requires_grad and not (
name == "classifier.weight" and model_class in [ModernBertDecoderForSequenceClassification]
):
data = torch.flatten(param.data)
n_elements = torch.numel(data)
# skip 2.5% of elements on each side to avoid issues caused by `nn.init.trunc_normal_` described in
# https://github.com/huggingface/transformers/pull/27906#issuecomment-1846951332
n_elements_to_skip_on_each_side = int(n_elements * 0.025)
data_to_check = torch.sort(data).values
if n_elements_to_skip_on_each_side > 0:
data_to_check = data_to_check[n_elements_to_skip_on_each_side:-n_elements_to_skip_on_each_side]
self.assertIn(
((data_to_check.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
@slow
@require_torch
class ModernBertDecoderIntegrationTest(unittest.TestCase):
def test_inference_causal_lm(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
model = ModernBertDecoderForCausalLM.from_pretrained("blab-jhu/test-32m-dec", attn_implementation="eager")
tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
inputs = tokenizer("Paris is the capital of", return_tensors="pt")
with torch.no_grad():
output = model(**inputs)[0]
expected_shape = torch.Size((1, 6, model.config.vocab_size))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-8.0183, -7.1578, -0.4453], [-6.2909, -6.1557, 4.9063], [-6.7689, -5.8068, 6.1078]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_no_head(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
model = ModernBertDecoderModel.from_pretrained("blab-jhu/test-32m-dec", attn_implementation="eager")
tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
inputs = tokenizer("Paris is the capital of", return_tensors="pt")
with torch.no_grad():
output = model(**inputs)[0]
expected_shape = torch.Size((1, 6, model.config.hidden_size))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0306, -0.0115, 0.0007], [-0.2485, -0.1381, 0.0872], [0.3133, -0.1777, 0.1667]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_generation(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
model = ModernBertDecoderForCausalLM.from_pretrained("blab-jhu/test-32m-dec", attn_implementation="eager")
tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
inputs = tokenizer("The weather today is", return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=10, do_sample=False)
output_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
# Check that we got some reasonable output
self.assertEqual(len(output_text), 1)
self.assertTrue(len(output_text[0]) > len("The weather today is"))
def test_sliding_window_long_context(self):
"""
Test that ModernBertDecoder works with sliding window attention for longer sequences.
"""
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
model = ModernBertDecoderForCausalLM.from_pretrained("blab-jhu/test-32m-dec", attn_implementation="eager")
tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
# Create a longer input to test sliding window attention
long_input = "This is a test. " * 50 # Repeat to make it longer
inputs = tokenizer(long_input, return_tensors="pt", truncation=True, max_length=512)
outputs = model.generate(**inputs, max_new_tokens=20, do_sample=False)
# Check that generation worked with longer context
self.assertEqual(outputs.shape[0], 1)
self.assertGreater(outputs.shape[1], inputs["input_ids"].shape[1])
def test_sequence_classification(self):
"""
Test that ModernBertDecoderForSequenceClassification works correctly.
"""
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
model = ModernBertDecoderForSequenceClassification.from_pretrained(
"blab-jhu/test-32m-dec", num_labels=2, attn_implementation="eager"
)
tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec")
# Test with sample input
inputs = tokenizer("This is a positive example.", return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
# Check output shape
expected_shape = (1, 2) # batch_size=1, num_labels=2
self.assertEqual(outputs.logits.shape, expected_shape)
# Test with labels
labels = torch.tensor([1])
outputs_with_loss = model(**inputs, labels=labels)
# Check that loss is computed
self.assertIsNotNone(outputs_with_loss.loss)
self.assertTrue(isinstance(outputs_with_loss.loss.item(), float))
| transformers/tests/models/modernbert_decoder/test_modeling_modernbert_decoder.py/0 | {
"file_path": "transformers/tests/models/modernbert_decoder/test_modeling_modernbert_decoder.py",
"repo_id": "transformers",
"token_count": 3618
} | 562 |
# Copyright 2021, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Musicgen model."""
import copy
import inspect
import math
import tempfile
import unittest
import numpy as np
import pytest
from pytest import mark
from transformers import (
EncodecConfig,
MusicgenConfig,
MusicgenDecoderConfig,
MusicgenProcessor,
PretrainedConfig,
T5Config,
)
from transformers.testing_utils import (
Expectations,
get_device_properties,
is_torch_available,
require_flash_attn,
require_torch,
require_torch_accelerator,
require_torch_fp16,
require_torch_gpu,
slow,
torch_device,
)
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, sdpa_kernel
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MusicgenForCausalLM,
MusicgenForConditionalGeneration,
MusicgenModel,
set_seed,
)
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__:
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(configs_no_init, key, 1e-10)
if isinstance(getattr(configs_no_init, key, None), PretrainedConfig):
no_init_subconfig = _config_zero_init(getattr(configs_no_init, key))
setattr(configs_no_init, key, no_init_subconfig)
return configs_no_init
def prepare_musicgen_decoder_inputs_dict(
config,
input_ids,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1])[:, 0, :]
attention_mask = attention_mask.ne(config.pad_token_id)
if encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_attention_mask = torch.ones(encoder_hidden_states.shape[:2], device=torch_device)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
class MusicgenDecoderTester:
def __init__(
self,
parent,
batch_size=4, # need batch_size != num_hidden_layers
seq_length=7,
is_training=True,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=100,
pad_token_id=99,
bos_token_id=99,
num_codebooks=4,
audio_channels=1,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.num_codebooks = num_codebooks
self.audio_channels = audio_channels
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size * self.num_codebooks, self.seq_length], self.vocab_size)
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
config = self.get_config()
inputs_dict = prepare_musicgen_decoder_inputs_dict(
config,
input_ids,
encoder_hidden_states=encoder_hidden_states,
)
return config, inputs_dict
def get_config(self):
config = MusicgenDecoderConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
d_ff=self.intermediate_size,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.bos_token_id,
bos_token_id=self.bos_token_id,
num_codebooks=self.num_codebooks,
tie_word_embeddings=False,
audio_channels=self.audio_channels,
)
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
@require_torch
class MusicgenDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (MusicgenModel, MusicgenForCausalLM) if is_torch_available() else ()
# Doesn't run generation tests. See `greedy_sample_model_classes` below
all_generative_model_classes = ()
greedy_sample_model_classes = (
(MusicgenForCausalLM,) if is_torch_available() else ()
) # we don't want to run all the generation tests, only a specific subset
pipeline_model_mapping = {}
test_pruning = False
test_resize_embeddings = False
def setUp(self):
self.model_tester = MusicgenDecoderTester(self)
self.config_tester = ConfigTester(self, config_class=MusicgenDecoderConfig, hidden_size=16)
def test_config(self):
self.config_tester.run_common_tests()
# special case for labels
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_codebooks),
dtype=torch.long,
device=torch_device,
)
return inputs_dict
def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None):
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
model = MusicgenForCausalLM(config)
model.to(torch_device)
model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs)
model.train()
# Contrarily to the initial method, we don't unfreeze freezed parameters.
# Indeed, sinusoidal position embeddings have frozen weights that should stay frozen.
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
inputs = self._prepare_for_class(inputs_dict, MusicgenForCausalLM, return_labels=True)
loss = model(**inputs).loss
loss.backward()
optimizer.step()
for k, v in model.named_parameters():
if v.requires_grad:
self.assertTrue(v.grad is not None, f"{k} in {MusicgenForCausalLM.__name__} has no gradient!")
# override since we have to compute the input embeddings over codebooks
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs["input_ids"]
del inputs["input_ids"]
embed_tokens = model.get_input_embeddings()
input_ids = input_ids.reshape(-1, config.num_codebooks, input_ids.shape[-1])
inputs["inputs_embeds"] = sum(
[embed_tokens[codebook](input_ids[:, codebook]) for codebook in range(config.num_codebooks)]
)
with torch.no_grad():
model(**inputs)[0]
# override since we have embeddings / LM heads over multiple codebooks
def test_model_get_set_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
first_embed = model.get_input_embeddings()[0]
self.assertIsInstance(first_embed, torch.nn.Embedding)
lm_heads = model.get_output_embeddings()
self.assertTrue(lm_heads is None or isinstance(lm_heads[0], torch.nn.Linear))
@unittest.skip(reason="MusicGen does not use inputs_embeds")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="MusicGen does not support all arguments tested")
def test_model_outputs_equivalence(self):
pass
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied")
def test_tie_model_weights(self):
pass
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied")
def test_tied_weights_keys(self):
pass
def _get_logits_processor_kwargs(self, do_sample=False, config=None):
logits_processor_kwargs = {}
return logits_processor_kwargs
def test_greedy_generate_stereo_outputs(self):
original_audio_channels = self.model_tester.audio_channels
self.model_tester.audio_channels = 2
super().test_greedy_generate_dict_outputs()
self.model_tester.audio_channels = original_audio_channels
@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@slow
# Copied from tests.test_modeling_common.ModelTesterMixin.test_flash_attn_2_inference_equivalence
def test_flash_attn_2_inference_equivalence(self):
for model_class in self.all_model_classes:
if not model_class._supports_flash_attn:
self.skipTest(f"{model_class.__name__} does not support Flash Attention 2")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16)
model.to(torch_device)
# Ignore copy
dummy_input = inputs_dict[model.main_input_name]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
dummy_attention_mask = inputs_dict.get("attention_mask", None)
if dummy_attention_mask is not None:
# Ignore copy
dummy_attention_mask[:, 1:] = 1
dummy_attention_mask[:, :1] = 0
# Ignore copy
outputs = model(dummy_input, output_hidden_states=True)
# Ignore copy
outputs_fa = model_fa(dummy_input, output_hidden_states=True)
logits = (
outputs.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs.decoder_hidden_states[-1]
)
logits_fa = (
outputs_fa.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs_fa.decoder_hidden_states[-1]
)
assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)
# Ignore copy
other_inputs = {
"output_hidden_states": True,
}
if dummy_attention_mask is not None:
other_inputs["attention_mask"] = dummy_attention_mask
outputs = model(dummy_input, **other_inputs)
outputs_fa = model_fa(dummy_input, **other_inputs)
logits = (
outputs.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs.decoder_hidden_states[-1]
)
logits_fa = (
outputs_fa.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs_fa.decoder_hidden_states[-1]
)
assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2)
# check with inference + dropout
model.train()
_ = model_fa(dummy_input, **other_inputs)
@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@slow
# Copied from tests.test_modeling_common.ModelTesterMixin.test_flash_attn_2_inference_equivalence_right_padding
def test_flash_attn_2_inference_equivalence_right_padding(self):
for model_class in self.all_model_classes:
if not model_class._supports_flash_attn:
self.skipTest(f"{model_class.__name__} does not support Flash Attention 2")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16)
model.to(torch_device)
# Ignore copy
dummy_input = inputs_dict[model.main_input_name]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
dummy_attention_mask = inputs_dict.get("attention_mask", None)
if dummy_attention_mask is not None:
# Ignore copy
dummy_attention_mask[:, :-1] = 1
dummy_attention_mask[:, -1:] = 0
if model.config.is_encoder_decoder:
decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)
outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True)
outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True)
else:
outputs = model(dummy_input, output_hidden_states=True)
outputs_fa = model_fa(dummy_input, output_hidden_states=True)
logits = (
outputs.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs.decoder_hidden_states[-1]
)
logits_fa = (
outputs_fa.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs_fa.decoder_hidden_states[-1]
)
assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)
# Ignore copy
other_inputs = {
"output_hidden_states": True,
}
if dummy_attention_mask is not None:
other_inputs["attention_mask"] = dummy_attention_mask
outputs = model(dummy_input, **other_inputs)
outputs_fa = model_fa(dummy_input, **other_inputs)
logits = (
outputs.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs.decoder_hidden_states[-1]
)
logits_fa = (
outputs_fa.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs_fa.decoder_hidden_states[-1]
)
assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2)
@unittest.skip(
reason=(
"MusicGen has a custom set of generation tests that rely on `GenerationTesterMixin`, controlled by "
"`greedy_sample_model_classes`"
)
)
def test_generation_tester_mixin_inheritance(self):
pass
def prepare_musicgen_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
labels=None,
):
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.reshape(
-1, config.decoder.num_codebooks, decoder_input_ids.shape[-1]
)[:, 0, :]
decoder_attention_mask = decoder_attention_mask.ne(config.decoder.pad_token_id)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"labels": labels,
}
class MusicgenTester:
def __init__(
self,
parent,
batch_size=4, # need batch_size != num_hidden_layers
seq_length=7,
is_training=True,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=100,
pad_token_id=99,
bos_token_id=99,
num_codebooks=4,
num_filters=4,
codebook_size=128,
audio_channels=1,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.num_codebooks = num_codebooks
self.num_filters = num_filters
self.codebook_size = codebook_size
self.audio_channels = audio_channels
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
decoder_input_ids = ids_tensor([self.batch_size * self.num_codebooks, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_musicgen_inputs_dict(config, input_ids, decoder_input_ids=decoder_input_ids)
return config, inputs_dict
def get_config(self):
text_encoder_config = T5Config(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
d_ff=self.intermediate_size,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
)
audio_encoder_config = EncodecConfig(
hidden_size=self.vocab_size,
compress=1,
num_filters=self.num_filters,
codebook_size=self.codebook_size,
codebook_dim=self.vocab_size,
)
decoder_config = MusicgenDecoderConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
ffn_dim=self.intermediate_size,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.bos_token_id,
bos_token_id=self.bos_token_id,
num_codebooks=self.num_codebooks,
tie_word_embeddings=False,
audio_channels=self.audio_channels,
)
config = MusicgenConfig.from_sub_models_config(text_encoder_config, audio_encoder_config, decoder_config)
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
@require_torch
class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (MusicgenForConditionalGeneration,) if is_torch_available() else ()
# Doesn't run generation tests. See `greedy_sample_model_classes` below
all_generative_model_classes = ()
greedy_sample_model_classes = (MusicgenForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"text-to-audio": MusicgenForConditionalGeneration} if is_torch_available() else {}
# Addition keys that are required for forward. MusicGen isn't encoder-decoder in config so we have to pass decoder ids as additional
additional_model_inputs = ["decoder_input_ids"]
test_pruning = False # training is not supported yet for MusicGen
test_headmasking = False
test_resize_embeddings = False
# not to test torchscript as the model tester doesn't prepare `input_values` and `padding_mask`
# (and `torchscript` hates `None` values).
test_torchscript = False
_is_composite = True
def setUp(self):
self.model_tester = MusicgenTester(self)
# special case for labels
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_codebooks),
dtype=torch.long,
device=torch_device,
)
return inputs_dict
def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None):
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs)
model.train()
# The audio encoder weights are not used during the forward pass (only during the generate pass)
# So we need to freeze it to be able to train.
model.freeze_audio_encoder()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
optimizer.step()
for k, v in model.named_parameters():
if v.requires_grad:
self.assertTrue(v.grad is not None, f"{k} in {model_class.__name__} has no gradient!")
def _check_output_with_attentions(self, outputs, config, input_ids, decoder_input_ids):
text_encoder_config = config.text_encoder
decoder_config = config.decoder
encoder_attentions = outputs["encoder_attentions"]
self.assertEqual(len(encoder_attentions), text_encoder_config.num_hidden_layers)
self.assertEqual(
encoder_attentions[0].shape[-3:],
(text_encoder_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]),
)
decoder_attentions = outputs["decoder_attentions"]
num_decoder_layers = decoder_config.num_hidden_layers
self.assertEqual(len(decoder_attentions), num_decoder_layers)
self.assertEqual(
decoder_attentions[0].shape[-3:],
(decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]),
)
cross_attentions = outputs["cross_attentions"]
self.assertEqual(len(cross_attentions), num_decoder_layers)
cross_attention_input_seq_len = decoder_input_ids.shape[-1]
self.assertEqual(
cross_attentions[0].shape[-3:],
(decoder_config.num_attention_heads, cross_attention_input_seq_len, input_ids.shape[-1]),
)
def check_musicgen_model_output_attentions(
self,
model_class,
config,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
**kwargs,
):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
output_attentions=True,
**kwargs,
)
self._check_output_with_attentions(outputs, config, input_ids, decoder_input_ids)
def check_musicgen_model_output_attentions_from_config(
self,
model_class,
config,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
**kwargs,
):
# Similar to `check_musicgen_model_output_attentions`, but with `output_attentions` triggered from the
# config file. Contrarily to most models, changing the model's config won't work -- the defaults are loaded
# from the inner models' configurations.
config.output_attentions = True # model config -> won't work
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
**kwargs,
)
self.assertTrue(
all(key not in outputs for key in ["encoder_attentions", "decoder_attentions", "cross_attentions"])
)
config.text_encoder.output_attentions = True # inner model config -> will work
config.audio_encoder.output_attentions = True
config.decoder.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
**kwargs,
)
self._check_output_with_attentions(outputs, config, input_ids, decoder_input_ids)
# override since changing `output_attentions` from the top-level model config won't work
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# force eager attention to support output attentions
config._attn_implementation = "eager"
for model_class in self.all_model_classes:
self.check_musicgen_model_output_attentions(model_class, config, **inputs_dict)
self.check_musicgen_model_output_attentions_from_config(model_class, config, **inputs_dict)
# override since we have a specific forward signature for musicgen
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = [
"input_ids",
"attention_mask",
"input_values",
"padding_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(
["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"]
if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names
else ["encoder_outputs"]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
# override since changing `gradient_checkpointing` from the top-level model config won't work
def test_gradient_checkpointing_backward_compatibility(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if not model_class.supports_gradient_checkpointing:
continue
config.text_encoder.gradient_checkpointing = True
config.audio_encoder.gradient_checkpointing = True
config.decoder.gradient_checkpointing = True
model = model_class(config)
self.assertTrue(model.is_gradient_checkpointing)
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied.")
def test_tie_model_weights(self):
pass
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied")
def test_tied_weights_keys(self):
pass
# override since changing `output_hidden_states` / `output_attentions` from the top-level model config won't work
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.text_encoder.output_hidden_states = True
config.audio_encoder.output_hidden_states = True
config.decoder.output_hidden_states = True
config.text_encoder.output_attentions = True
config.decoder.output_attentions = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
decoder_hidden_states = outputs.decoder_hidden_states[0]
decoder_hidden_states.retain_grad()
if self.has_attentions:
encoder_attentions = outputs.encoder_attentions[0]
encoder_attentions.retain_grad()
decoder_attentions = outputs.decoder_attentions[0]
decoder_attentions.retain_grad()
cross_attentions = outputs.cross_attentions[0]
cross_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(decoder_hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(decoder_attentions.grad)
self.assertIsNotNone(cross_attentions.grad)
# override since changing `output_hidden_states` from the top-level model config won't work
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states
expected_num_layers = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(hidden_states), expected_num_layers)
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.text_encoder.output_hidden_states = True
config.audio_encoder.output_hidden_states = True
config.decoder.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# override since the conv layers and lstm's in encodec are exceptions
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
uniform_init_parms = ["conv"]
ignore_init = ["lstm"]
if param.requires_grad:
if any(x in name for x in uniform_init_parms):
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
elif not any(x in name for x in ignore_init):
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# override since we have embeddings / LM heads over multiple codebooks
def test_model_get_set_embeddings(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), torch.nn.Embedding)
lm_heads = model.get_output_embeddings()
self.assertTrue(lm_heads is None or isinstance(lm_heads[0], torch.nn.Linear))
def _get_logits_processor_kwargs(self, do_sample=False, config=None):
logits_processor_kwargs = {}
return logits_processor_kwargs
@require_torch_fp16
@require_torch_accelerator # not all operations are supported in fp16 on CPU
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.greedy_sample_model_classes:
model = model_class(config).eval().to(torch_device)
model.half()
# greedy
model.generate(input_dict["input_ids"], attention_mask=input_dict["attention_mask"], max_new_tokens=10)
# sampling
model.generate(
input_dict["input_ids"], attention_mask=input_dict["attention_mask"], do_sample=True, max_new_tokens=10
)
def test_greedy_generate_stereo_outputs(self):
original_audio_channels = self.model_tester.audio_channels
self.model_tester.audio_channels = 2
super().test_greedy_generate_dict_outputs()
self.model_tester.audio_channels = original_audio_channels
@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@slow
# Adapted from tests.test_modeling_common.ModelTesterMixin.test_flash_attn_2_inference_equivalence
def test_flash_attn_2_inference_equivalence(self):
for model_class in self.all_model_classes:
if not model_class._supports_flash_attn:
self.skipTest(f"{model_class.__name__} does not support Flash Attention 2")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname,
dtype=torch.bfloat16,
attn_implementation={"decoder": "flash_attention_2", "audio_encoder": None, "text_encoder": None},
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16)
model.to(torch_device)
# Ignore copy
dummy_input = inputs_dict[model.main_input_name]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
dummy_attention_mask = inputs_dict.get("attention_mask", None)
if dummy_attention_mask is not None:
# Ignore copy
dummy_attention_mask[:, 1:] = 1
dummy_attention_mask[:, :1] = 0
# Ignore copy
decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)
# Ignore copy
outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True)
# Ignore copy
outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True)
logits = (
outputs.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs.decoder_hidden_states[-1]
)
logits_fa = (
outputs_fa.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs_fa.decoder_hidden_states[-1]
)
assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)
# Ignore copy
other_inputs = {
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": dummy_attention_mask,
"output_hidden_states": True,
}
# Ignore copy
if dummy_attention_mask is not None:
other_inputs["attention_mask"] = dummy_attention_mask
# Ignore copy
outputs = model(dummy_input, **other_inputs)
# Ignore copy
outputs_fa = model_fa(dummy_input, **other_inputs)
logits = (
outputs.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs.decoder_hidden_states[-1]
)
logits_fa = (
outputs_fa.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs_fa.decoder_hidden_states[-1]
)
assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2)
# check with inference + dropout
model.train()
_ = model_fa(dummy_input, **other_inputs)
@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@slow
def test_flash_attn_2_conversion(self):
self.skipTest(reason="Musicgen doesn't use the MusicgenFlashAttention2 class method.")
@require_torch_accelerator
@slow
def test_sdpa_can_dispatch_on_flash(self):
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
device_type, major, _ = get_device_properties()
if device_type == "cuda" and major < 8:
self.skipTest(reason="This test requires an NVIDIA GPU with compute capability >= 8.0")
elif device_type == "rocm" and major < 9:
self.skipTest(reason="This test requires an AMD GPU with compute capability >= 9.0")
elif device_type not in ["cuda", "rocm", "xpu"]:
self.skipTest(reason="This test requires a Nvidia or AMD GPU or an Intel XPU")
torch.compiler.reset()
for model_class in self.all_model_classes:
if not model_class._supports_sdpa:
self.skipTest(f"{model_class.__name__} does not support SDPA")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
inputs_dict = self._prepare_for_class(inputs_dict, model_class)
if config.model_type in ["llava", "llava_next", "vipllava", "video_llava"]:
self.skipTest(
reason="Llava-like models currently (transformers==4.39.1) requires an attention_mask input"
)
if config.model_type in ["paligemma"]:
self.skipTest(
"PaliGemma-like models currently (transformers==4.41.0) requires an attention_mask input"
)
if config.model_type in ["idefics", "idefics2", "idefics3"]:
self.skipTest(reason="Idefics currently (transformers==4.39.1) requires an image_attention_mask input")
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(
tmpdirname,
dtype=torch.float16,
attn_implementation={"decoder": "sdpa", "audio_encoder": None, "text_encoder": None},
)
model.to(torch_device)
inputs_dict.pop("attention_mask", None)
inputs_dict.pop("decoder_attention_mask", None)
for name, inp in inputs_dict.items():
if isinstance(inp, torch.Tensor) and inp.dtype in [torch.float32, torch.float16]:
inputs_dict[name] = inp.to(torch.float16)
with sdpa_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
_ = model(**inputs_dict)
@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@slow
# Adapted from tests.test_modeling_common.ModelTesterMixin.test_flash_attn_2_inference_equivalence_right_padding
def test_flash_attn_2_inference_equivalence_right_padding(self):
for model_class in self.all_model_classes:
if not model_class._supports_flash_attn:
self.skipTest(f"{model_class.__name__} does not support Flash Attention 2")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname,
dtype=torch.bfloat16,
attn_implementation={"decoder": "flash_attention_2", "audio_encoder": None, "text_encoder": None},
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16)
model.to(torch_device)
# Ignore copy
dummy_input = inputs_dict[model.main_input_name]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
dummy_attention_mask = inputs_dict.get("attention_mask", None)
if dummy_attention_mask is not None:
# Ignore copy
dummy_attention_mask[:, :-1] = 1
dummy_attention_mask[:, -1:] = 0
# Ignore copy
decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)
# Ignore copy
outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True)
# Ignore copy
outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True)
logits = (
outputs.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs.decoder_hidden_states[-1]
)
logits_fa = (
outputs_fa.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs_fa.decoder_hidden_states[-1]
)
assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)
# Ignore copy
other_inputs = {
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": dummy_attention_mask,
"output_hidden_states": True,
}
# Ignore copy
if dummy_attention_mask is not None:
other_inputs["attention_mask"] = dummy_attention_mask
# Ignore copy
outputs = model(dummy_input, **other_inputs)
# Ignore copy
outputs_fa = model_fa(dummy_input, **other_inputs)
logits = (
outputs.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs.decoder_hidden_states[-1]
)
logits_fa = (
outputs_fa.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs_fa.decoder_hidden_states[-1]
)
assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2)
def test_sdpa_can_dispatch_composite_models(self):
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
audio_encoder_attn = "sdpa" if model.audio_encoder._supports_sdpa else "eager"
text_encoder_attn = "sdpa" if model.text_encoder._supports_sdpa else "eager"
decoder_attn = "sdpa" if model.decoder._supports_sdpa else "eager"
# `None` as it is the requested one which will be assigned to each sub-config
# Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present)
self.assertTrue(model_sdpa.audio_encoder.config._attn_implementation == audio_encoder_attn)
self.assertTrue(model_sdpa.text_encoder.config._attn_implementation == text_encoder_attn)
self.assertTrue(model_sdpa.decoder.config._attn_implementation == decoder_attn)
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_eager.audio_encoder.config._attn_implementation == "eager")
self.assertTrue(model_eager.text_encoder.config._attn_implementation == "eager")
self.assertTrue(model_eager.decoder.config._attn_implementation == "eager")
self.assertTrue(model_eager.config._attn_implementation == "eager")
def test_requires_grad_with_frozen_encoders(self):
config = self.model_tester.get_config()
for model_class in self.all_model_classes:
model = model_class(config)
model.freeze_audio_encoder()
audio_encoder_grads = [param.requires_grad for param in model.audio_encoder.parameters()]
text_encoder_grads = [param.requires_grad for param in model.text_encoder.parameters()]
self.assertFalse(all(audio_encoder_grads))
self.assertTrue(all(text_encoder_grads))
model = model_class(config)
model.freeze_text_encoder()
audio_encoder_grads = [param.requires_grad for param in model.audio_encoder.parameters()]
text_encoder_grads = [param.requires_grad for param in model.text_encoder.parameters()]
self.assertTrue(all(audio_encoder_grads))
self.assertFalse(all(text_encoder_grads))
@unittest.skip(
reason=(
"MusicGen has a custom set of generation tests that rely on `GenerationTesterMixin`, controlled by "
"`greedy_sample_model_classes`"
)
)
def test_generation_tester_mixin_inheritance(self):
pass
@unittest.skip(reason=("MusicGen has a set of composite models which might not have SDPA themselves, e.g. T5."))
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
def get_bip_bip(bip_duration=0.125, duration=0.5, sample_rate=32000):
"""Produces a series of 'bip bip' sounds at a given frequency."""
timesteps = np.arange(int(duration * sample_rate)) / sample_rate
wav = np.cos(2 * math.pi * 440 * timesteps)
time_period = (timesteps % (2 * bip_duration)) / (2 * bip_duration)
envelope = time_period >= 0.5
return wav * envelope
def place_dict_on_device(dict_to_place, device):
for key in dict_to_place:
if dict_to_place[key] is not None and isinstance(dict_to_place[key], torch.Tensor):
dict_to_place[key] = dict_to_place[key].to(device)
return dict_to_place
@require_torch
class MusicgenIntegrationTests(unittest.TestCase):
@cached_property
def model(self):
return MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small").to(torch_device)
@cached_property
def processor(self):
return MusicgenProcessor.from_pretrained("facebook/musicgen-small")
@slow
def test_logits_text_prompt(self):
model = self.model
processor = self.processor
inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt")
# prepare the encoder inputs
input_ids = inputs.input_ids.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
# prepare the decoder inputs
pad_token_id = model.generation_config.pad_token_id
decoder_input_ids = (
torch.ones((input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long).to(torch_device)
* pad_token_id
)
with torch.no_grad():
logits = model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
).logits
# fmt: off
EXPECTED_LOGITS = torch.tensor(
[
-0.9708, -3.0149, -4.6415, -1.4754, -0.2786, -2.3523, -2.6049, -6.7467,
-1.0206, -3.2984, -3.3968, -1.5108, -1.5786, -3.1493, -1.1503, -0.0545,
]
)
# fmt: on
self.assertTrue(logits.shape == (*decoder_input_ids.shape, model.decoder.config.vocab_size))
torch.testing.assert_close(logits[0, 0, :16].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_logits_text_audio_prompt(self):
model = self.model
processor = self.processor
audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)]
text = ["80s music", "Club techno"]
inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt")
# prepare the text encoder inputs
input_ids = inputs.input_ids.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
# prepare the audio encoder inputs
input_values = inputs.input_values.to(torch_device)
padding_mask = inputs.padding_mask.to(torch_device)
with torch.no_grad():
logits = model(
input_ids,
attention_mask=attention_mask,
input_values=input_values,
padding_mask=padding_mask,
).logits
# fmt: off
EXPECTED_LOGITS = torch.tensor(
[
0.1841, -2.9324, -0.7898, 0.1857, 0.4971, -2.8685, -1.6525, -1.6541,
2.7757, -2.5942, -3.0959, -1.0120, -1.0147, -0.4605, -0.8885, 0.6820,
]
)
# fmt: on
self.assertTrue(logits.shape == (8, 50, 2048))
torch.testing.assert_close(logits[0, -1, :16].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
@slow
def test_generate_unconditional_greedy(self):
model = self.model
# only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same
unconditional_inputs = model.get_unconditional_inputs(num_samples=1)
unconditional_inputs = place_dict_on_device(unconditional_inputs, device=torch_device)
output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=5)
# fmt: off
EXPECTED_VALUES = torch.tensor(
[
0.0056, 0.0064, 0.0063, 0.0054, 0.0042, 0.0033, 0.0024, 0.0015,
0.0015, 0.0010, 0.0004, -0.0012, -0.0036, -0.0055, -0.0067, -0.0071,
]
)
# fmt: on
self.assertTrue(output_values.shape == (1, 1, 3200))
torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_unconditional_sampling(self):
model = self.model
# for stochastic sampling we can generate multiple outputs
unconditional_inputs = model.get_unconditional_inputs(num_samples=2)
unconditional_inputs = place_dict_on_device(unconditional_inputs, device=torch_device)
set_seed(0)
output_values = model.generate(**unconditional_inputs, do_sample=True, max_new_tokens=10)
# fmt: off
expectations = Expectations(
{
(None, None): [-0.0099, -0.0140, 0.0079, 0.0080, -0.0046, 0.0065, -0.0068, -0.0185, 0.0105, 0.0059, 0.0329, 0.0249, -0.0204, -0.0341, -0.0465, 0.0053],
("cuda", 8): [-0.0099, -0.0140, 0.0079, 0.0080, -0.0046, 0.0065, -0.0068, -0.0185, 0.0105, 0.0058, 0.0328, 0.0249, -0.0205, -0.0342, -0.0466, 0.0052],
}
)
EXPECTED_VALUES = torch.tensor(expectations.get_expectation()).to(torch_device)
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
torch.testing.assert_close(output_values[0, 0, :16], EXPECTED_VALUES, rtol=2e-4, atol=2e-4)
@slow
def test_generate_text_prompt_greedy(self):
model = self.model
processor = self.processor
inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt")
# prepare the encoder inputs
input_ids = inputs.input_ids.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
output_values = model.generate(
input_ids, attention_mask=attention_mask, do_sample=False, guidance_scale=None, max_new_tokens=10
)
# fmt: off
EXPECTED_VALUES = torch.tensor(
[
-1.1998e-04, -2.2302e-04, 4.6296e-04, 1.0524e-03, 2.4827e-04,
-4.0288e-05, -1.2468e-04, 4.9846e-05, 7.1485e-04, 4.4197e-04,
]
)
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
torch.testing.assert_close(output_values[0, 0, :10].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_prompt_greedy_with_classifier_free_guidance(self):
model = self.model
processor = self.processor
inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt")
# prepare the encoder inputs
input_ids = inputs.input_ids.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
output_values = model.generate(
input_ids, attention_mask=attention_mask, do_sample=False, guidance_scale=3, max_new_tokens=10
)
# fmt: off
EXPECTED_VALUES = torch.tensor(
[
0.0283, 0.0246, 0.0650, 0.0640, 0.0599, 0.0711, 0.0420, 0.0112,
0.0511, 0.0746, 0.1363, 0.1213, 0.0185, -0.0578, -0.0908, 0.0443,
]
)
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_prompt_sampling(self):
model = self.model
processor = self.processor
inputs = processor(text=["80s music", "Club techno"], padding=True, return_tensors="pt")
# prepare the encoder inputs
input_ids = inputs.input_ids.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
set_seed(0)
output_values = model.generate(
input_ids, attention_mask=attention_mask, do_sample=True, guidance_scale=None, max_new_tokens=10
)
# fmt: off
expectations = Expectations(
{
(None, None): [-0.0111, -0.0154, 0.0047, 0.0058, -0.0068, 0.0012, -0.0109, -0.0229, 0.0010, -0.0038, 0.0167, 0.0042, -0.0421, -0.0610, -0.0764, -0.0326],
("cuda", 8): [-0.0110, -0.0153, 0.0048, 0.0058, -0.0068, 0.0012, -0.0109, -0.0229, 0.0010, -0.0037, 0.0168, 0.0042, -0.0420, -0.0609, -0.0763, -0.0326],
}
)
EXPECTED_VALUES = torch.tensor(expectations.get_expectation()).to(torch_device)
# fmt: on
self.assertTrue(output_values.shape == (2, 1, 4480))
torch.testing.assert_close(output_values[0, 0, :16], EXPECTED_VALUES, rtol=2e-4, atol=2e-4)
@slow
def test_generate_text_audio_prompt(self):
model = self.model
processor = self.processor
audio = [get_bip_bip(duration=0.5), get_bip_bip(duration=1.0)]
text = ["80s music", "Club techno"]
inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt")
inputs = place_dict_on_device(inputs, device=torch_device)
output_values = model.generate(**inputs, do_sample=False, guidance_scale=None, max_new_tokens=10)
# fmt: off
EXPECTED_VALUES = torch.tensor(
[
-0.0036, -0.0130, -0.0261, -0.0384, -0.0557, -0.0718, -0.0680, -0.0632,
-0.0529, -0.0403, -0.0289, -0.0198, -0.0136, -0.0101, -0.0095, -0.0040,
]
)
# fmt: on
self.assertTrue(
output_values.shape == (2, 1, 36480)
) # input values take shape 32000 and we generate from there
torch.testing.assert_close(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES, rtol=2e-4, atol=2e-4)
@require_torch
class MusicgenStereoIntegrationTests(unittest.TestCase):
@cached_property
def model(self):
return MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-stereo-small").to(torch_device)
@cached_property
def processor(self):
return MusicgenProcessor.from_pretrained("facebook/musicgen-stereo-small")
@slow
def test_generate_unconditional_greedy(self):
model = self.model
# only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same
unconditional_inputs = model.get_unconditional_inputs(num_samples=1)
unconditional_inputs = place_dict_on_device(unconditional_inputs, device=torch_device)
output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=12)
# fmt: off
EXPECTED_VALUES_LEFT = torch.tensor(
[
0.0017, 0.0004, 0.0004, 0.0005, 0.0002, 0.0002, -0.0002, -0.0013,
-0.0010, -0.0015, -0.0018, -0.0032, -0.0060, -0.0082, -0.0096, -0.0099,
]
)
EXPECTED_VALUES_RIGHT = torch.tensor(
[
0.0038, 0.0028, 0.0031, 0.0032, 0.0031, 0.0032, 0.0030, 0.0019,
0.0021, 0.0015, 0.0009, -0.0008, -0.0040, -0.0067, -0.0087, -0.0096,
]
)
# fmt: on
# (bsz, channels, seq_len)
self.assertTrue(output_values.shape == (1, 2, 5760))
torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(output_values[0, 1, :16].cpu(), EXPECTED_VALUES_RIGHT, rtol=1e-4, atol=1e-4)
@slow
def test_generate_text_audio_prompt(self):
model = self.model
processor = self.processor
# create stereo inputs
audio = [get_bip_bip(duration=0.5)[None, :].repeat(2, 0), get_bip_bip(duration=1.0)[None, :].repeat(2, 0)]
text = ["80s music", "Club techno"]
inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt")
inputs = place_dict_on_device(inputs, device=torch_device)
output_values = model.generate(**inputs, do_sample=False, guidance_scale=3.0, max_new_tokens=12)
# fmt: off
EXPECTED_VALUES_LEFT = torch.tensor(
[
0.2535, 0.2008, 0.1471, 0.0896, 0.0306, -0.0200, -0.0501, -0.0728,
-0.0832, -0.0856, -0.0867, -0.0884, -0.0864, -0.0866, -0.0744, -0.0430,
]
)
EXPECTED_VALUES_RIGHT = torch.tensor(
[
0.1695, 0.1213, 0.0732, 0.0239, -0.0264, -0.0705, -0.0935, -0.1103,
-0.1163, -0.1139, -0.1104, -0.1082, -0.1027, -0.1004, -0.0900, -0.0614,
]
)
# fmt: on
# (bsz, channels, seq_len)
self.assertTrue(output_values.shape == (2, 2, 37760))
# input values take shape 32000 and we generate from there - we check the last (generated) values
torch.testing.assert_close(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES_LEFT, rtol=2e-4, atol=2e-4)
torch.testing.assert_close(output_values[0, 1, -16:].cpu(), EXPECTED_VALUES_RIGHT, rtol=2e-4, atol=2e-4)
| transformers/tests/models/musicgen/test_modeling_musicgen.py/0 | {
"file_path": "transformers/tests/models/musicgen/test_modeling_musicgen.py",
"repo_id": "transformers",
"token_count": 31037
} | 563 |
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
if is_torchvision_available():
from transformers import OneFormerImageProcessorFast
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle, prepare_metadata
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
class OneFormerImageProcessorTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
min_resolution=30,
max_resolution=400,
size=None,
do_resize=True,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
num_labels=10,
do_reduce_labels=False,
ignore_index=255,
repo_path="shi-labs/oneformer_demo",
class_info_file="ade20k_panoptic.json",
num_text=10,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.class_info_file = class_info_file
self.num_text = num_text
self.repo_path = repo_path
# for the post_process_functions
self.batch_size = 2
self.num_queries = 10
self.num_classes = 10
self.height = 3
self.width = 4
self.num_labels = num_labels
self.do_reduce_labels = do_reduce_labels
self.ignore_index = ignore_index
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"num_text": self.num_text,
}
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to OneFormerImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
if w < h:
expected_height = int(self.size["shortest_edge"] * h / w)
expected_width = self.size["shortest_edge"]
elif w > h:
expected_height = self.size["shortest_edge"]
expected_width = int(self.size["shortest_edge"] * w / h)
else:
expected_height = self.size["shortest_edge"]
expected_width = self.size["shortest_edge"]
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
def get_fake_oneformer_outputs(self):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)),
masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)),
)
def expected_output_image_shape(self, images):
height, width = self.get_expected_values(images, batched=True)
return self.num_channels, height, width
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_single_inputs
def prepare_semantic_single_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
example = ds[0]
return example["image"], example["map"]
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_batch_inputs
def prepare_semantic_batch_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
return list(ds["image"][:2]), list(ds["map"][:2])
@require_torch
@require_vision
class OneFormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
fast_image_processing_class = OneFormerImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = OneFormerImageProcessorTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_proc_properties(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "image_mean"))
self.assertTrue(hasattr(image_processor, "image_std"))
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "do_resize"))
self.assertTrue(hasattr(image_processor, "size"))
self.assertTrue(hasattr(image_processor, "ignore_index"))
self.assertTrue(hasattr(image_processor, "class_info_file"))
self.assertTrue(hasattr(image_processor, "num_text"))
self.assertTrue(hasattr(image_processor, "repo_path"))
self.assertTrue(hasattr(image_processor, "metadata"))
self.assertTrue(hasattr(image_processor, "do_reduce_labels"))
def comm_get_image_processor_inputs(
self, with_segmentation_maps=False, is_instance_map=False, segmentation_type="np", image_processing_class=None
):
image_processor = image_processing_class(**self.image_processor_dict)
# prepare image and target
num_labels = self.image_processor_tester.num_labels
annotations = None
instance_id_to_semantic_id = None
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
if with_segmentation_maps:
high = num_labels
if is_instance_map:
labels_expanded = list(range(num_labels)) * 2
instance_id_to_semantic_id = dict(enumerate(labels_expanded))
annotations = [
np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uint8) for img in image_inputs
]
if segmentation_type == "pil":
annotations = [Image.fromarray(annotation) for annotation in annotations]
inputs = image_processor(
image_inputs,
["semantic"] * len(image_inputs),
annotations,
return_tensors="pt",
instance_id_to_semantic_id=instance_id_to_semantic_id,
pad_and_return_pixel_mask=True,
)
return inputs
@unittest.skip
def test_init_without_params(self):
pass
def test_call_with_segmentation_maps(self):
def common(is_instance_map=False, segmentation_type=None):
for image_processing_class in self.image_processor_list:
inputs = self.comm_get_image_processor_inputs(
with_segmentation_maps=True,
is_instance_map=is_instance_map,
segmentation_type=segmentation_type,
image_processing_class=image_processing_class,
)
mask_labels = inputs["mask_labels"]
class_labels = inputs["class_labels"]
pixel_values = inputs["pixel_values"]
text_inputs = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(mask_labels, class_labels, text_inputs):
self.assertEqual(mask_label.shape[0], class_label.shape[0])
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:])
self.assertEqual(len(text_input), self.image_processor_tester.num_text)
common()
common(is_instance_map=True)
common(is_instance_map=False, segmentation_type="pil")
common(is_instance_map=True, segmentation_type="pil")
def test_binary_mask_to_rle(self):
fake_binary_mask = np.zeros((20, 50))
fake_binary_mask[0, 20:] = 1
fake_binary_mask[1, :15] = 1
fake_binary_mask[5, :10] = 1
rle = binary_mask_to_rle(fake_binary_mask)
self.assertEqual(len(rle), 4)
self.assertEqual(rle[0], 21)
self.assertEqual(rle[1], 45)
def test_post_process_semantic_segmentation(self):
for image_processing_class in self.image_processor_list:
fature_extractor = image_processing_class(
num_labels=self.image_processor_tester.num_classes,
max_seq_length=77,
task_seq_length=77,
class_info_file="ade20k_panoptic.json",
num_text=self.image_processor_tester.num_text,
repo_path="shi-labs/oneformer_demo",
)
outputs = self.image_processor_tester.get_fake_oneformer_outputs()
segmentation = fature_extractor.post_process_semantic_segmentation(outputs)
self.assertEqual(len(segmentation), self.image_processor_tester.batch_size)
self.assertEqual(
segmentation[0].shape,
(
self.image_processor_tester.height,
self.image_processor_tester.width,
),
)
target_sizes = [(1, 4) for i in range(self.image_processor_tester.batch_size)]
segmentation = fature_extractor.post_process_semantic_segmentation(outputs, target_sizes=target_sizes)
self.assertEqual(segmentation[0].shape, target_sizes[0])
def test_post_process_instance_segmentation(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(
num_labels=self.image_processor_tester.num_classes,
max_seq_length=77,
task_seq_length=77,
class_info_file="ade20k_panoptic.json",
num_text=self.image_processor_tester.num_text,
repo_path="shi-labs/oneformer_demo",
)
outputs = self.image_processor_tester.get_fake_oneformer_outputs()
segmentation = image_processor.post_process_instance_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(
el["segmentation"].shape, (self.image_processor_tester.height, self.image_processor_tester.width)
)
segmentation_with_opts = image_processor.post_process_instance_segmentation(
outputs,
threshold=0,
target_sizes=[(1, 4) for _ in range(self.image_processor_tester.batch_size)],
task_type="panoptic",
)
self.assertTrue(len(segmentation_with_opts) == self.image_processor_tester.batch_size)
for el in segmentation_with_opts:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (1, 4))
def test_post_process_panoptic_segmentation(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(
num_labels=self.image_processor_tester.num_classes,
max_seq_length=77,
task_seq_length=77,
class_info_file="ade20k_panoptic.json",
num_text=self.image_processor_tester.num_text,
repo_path="shi-labs/oneformer_demo",
)
outputs = self.image_processor_tester.get_fake_oneformer_outputs()
segmentation = image_processor.post_process_panoptic_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(
el["segmentation"].shape, (self.image_processor_tester.height, self.image_processor_tester.width)
)
def test_can_load_with_local_metadata(self):
# Create a temporary json file
class_info = {
"0": {"isthing": 0, "name": "foo"},
"1": {"isthing": 0, "name": "bar"},
"2": {"isthing": 1, "name": "baz"},
}
metadata = prepare_metadata(class_info)
for image_processing_class in self.image_processor_list:
with tempfile.TemporaryDirectory() as tmpdirname:
metadata_path = os.path.join(tmpdirname, "metadata.json")
with open(metadata_path, "w") as f:
json.dump(class_info, f)
config_dict = self.image_processor_dict
config_dict["class_info_file"] = metadata_path
config_dict["repo_path"] = tmpdirname
image_processor = image_processing_class(**config_dict)
self.assertEqual(image_processor.metadata, metadata)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image, dummy_map = prepare_semantic_single_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values)
for mask_label_slow, mask_label_fast in zip(image_encoding_slow.mask_labels, image_encoding_fast.mask_labels):
self._assert_slow_fast_tensors_equivalence(mask_label_slow, mask_label_fast)
for class_label_slow, class_label_fast in zip(
image_encoding_slow.class_labels, image_encoding_fast.class_labels
):
self._assert_slow_fast_tensors_equivalence(class_label_slow.float(), class_label_fast.float())
self.assertEqual(image_encoding_slow.text_inputs, image_encoding_fast.text_inputs)
self.assertEqual(image_encoding_slow.task_inputs, image_encoding_fast.task_inputs)
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images, dummy_maps = prepare_semantic_batch_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(
dummy_images,
segmentation_maps=dummy_maps,
task_inputs=["instance"] + ["semantic"] * (len(dummy_images) - 1),
return_tensors="pt",
)
encoding_fast = image_processor_fast(
dummy_images,
segmentation_maps=dummy_maps,
task_inputs=["instance"] + ["semantic"] * (len(dummy_images) - 1),
return_tensors="pt",
)
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
for mask_label_slow, mask_label_fast in zip(encoding_slow.mask_labels, encoding_fast.mask_labels):
self._assert_slow_fast_tensors_equivalence(mask_label_slow, mask_label_fast)
for class_label_slow, class_label_fast in zip(encoding_slow.class_labels, encoding_fast.class_labels):
self._assert_slow_fast_tensors_equivalence(class_label_slow.float(), class_label_fast.float())
self.assertEqual(encoding_slow.text_inputs, encoding_fast.text_inputs)
self.assertEqual(encoding_slow.task_inputs, encoding_fast.task_inputs)
| transformers/tests/models/oneformer/test_image_processing_oneformer.py/0 | {
"file_path": "transformers/tests/models/oneformer/test_image_processing_oneformer.py",
"repo_id": "transformers",
"token_count": 8875
} | 564 |
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Phi-3 model."""
import unittest
import pytest
from transformers import Phi3Config, StaticCache, is_torch_available
from transformers.models.auto.configuration_auto import AutoConfig
from transformers.testing_utils import (
Expectations,
require_torch,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
if is_torch_available():
import torch
from transformers import (
AutoTokenizer,
Phi3ForCausalLM,
Phi3ForSequenceClassification,
Phi3ForTokenClassification,
Phi3Model,
)
from transformers.models.phi3.modeling_phi3 import Phi3RotaryEmbedding
end_of_text_token = 32000
class Phi3MiniWithStaticCache(torch.nn.Module):
def __init__(self, model: Phi3ForCausalLM, batch_size: int, max_seq_len: int):
super().__init__()
self.model = model
self.cache = StaticCache(config=model.config, max_cache_len=max_seq_len)
def forward(
self,
input_ids: torch.LongTensor = None,
) -> torch.FloatTensor:
return self.model.forward(
input_ids=input_ids,
use_cache=True,
return_dict=True,
past_key_values=self.cache,
).logits
@staticmethod
def generate(model: Phi3ForCausalLM, prompt_tokens: torch.LongTensor, max_seq_len: int) -> list[int]:
model = Phi3MiniWithStaticCache(model, 1, max_seq_len + prompt_tokens.shape[-1])
response_tokens = []
for input_pos in range(prompt_tokens.shape[-1]):
result = model.forward(
input_ids=prompt_tokens[:, input_pos : input_pos + 1],
)
response_tokens.append(prompt_tokens[0][input_pos].item())
current_token = torch.argmax(result[:, -1, :], dim=-1).item()
response_tokens.append(current_token)
while current_token != end_of_text_token and len(response_tokens) < max_seq_len:
result = model.forward(
input_ids=torch.tensor([[current_token]], dtype=torch.long),
)
current_token = torch.argmax(result[:, -1, :], dim=-1).item()
response_tokens.append(current_token)
return response_tokens
class Phi3ModelTester(CausalLMModelTester):
config_class = Phi3Config
if is_torch_available():
base_model_class = Phi3Model
causal_lm_class = Phi3ForCausalLM
sequence_class = Phi3ForSequenceClassification
token_class = Phi3ForTokenClassification
@require_torch
class Phi3ModelTest(CausalLMModelTest, unittest.TestCase):
all_model_classes = (
(Phi3Model, Phi3ForCausalLM, Phi3ForSequenceClassification, Phi3ForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": Phi3Model,
"text-classification": Phi3ForSequenceClassification,
"token-classification": Phi3ForTokenClassification,
"text-generation": Phi3ForCausalLM,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
model_tester_class = Phi3ModelTester
rotary_embedding_layer = Phi3RotaryEmbedding
@slow
@require_torch
class Phi3IntegrationTest(unittest.TestCase):
def test_model_phi3_mini_4k_instruct_logits(self):
input_ids = {
"input_ids": torch.tensor(
[[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device
)
}
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct").to(torch_device)
model.eval()
output = model(**input_ids).logits
EXPECTED_OUTPUT = torch.tensor(
[
[8.9005, 8.5380, 12.0361, 9.1562, 7.4068, 10.2581, 7.8991, 7.2447,7.0626, 7.5760, 7.8315, 9.4076, 16.1104, 20.1290, 7.7500, 7.1947, 6.1550, 7.0563, 8.5344, 8.7248, 7.1359, 7.8237, 7.6817, 7.6395, 7.7924, 6.9702, 6.9097, 8.7074, 9.5768, 8.1145],
[18.7090, 18.5701, 19.3660, 21.5171, 17.5042, 17.8716, 16.3554, 17.4617, 18.1623, 16.5641, 17.7547, 18.0193, 23.8355, 29.4481, 16.3864, 16.0560, 16.1543, 18.5507, 18.1343, 17.3883, 17.7422, 17.3012, 16.7657, 17.6874,17.9067, 16.8301, 16.2719, 18.3709, 19.0318, 16.7315],
]
).to(torch_device) # fmt: skip
torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
def test_phi3_mini_4k_instruct_generation(self):
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
messages = [
{
"role": "system",
"content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.",
},
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
]
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
outputs = model.generate(inputs, max_new_tokens=32)
output_text = tokenizer.batch_decode(outputs)
EXPECTED_OUTPUT = [
"<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious ways. Here are some ideas for incorporating these fruits into your"
]
self.assertListEqual(output_text, EXPECTED_OUTPUT)
def test_phi3_mini_4k_instruct_with_static_cache(self):
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
messages = [
{
"role": "system",
"content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.",
},
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
]
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
response_tokens = Phi3MiniWithStaticCache.generate(model, inputs, 64)
output_text = tokenizer.batch_decode(torch.tensor([response_tokens], dtype=torch.long, device=torch_device))
EXPECTED_OUTPUT = [
"<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious ways. Here are some"
]
self.assertListEqual(output_text, EXPECTED_OUTPUT)
def test_model_phi3_mini_128k_instruct_logits(self):
input_ids = {
"input_ids": torch.tensor(
[[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device
)
}
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-128k-instruct").to(torch_device)
model.eval()
output = model(**input_ids).logits
EXPECTED_OUTPUT = torch.tensor(
[
[10.6076, 10.6499, 14.0601, 19.8499, 15.1787, 19.3717, 19.9782, 17.0394, 15.7875, 18.1403, 19.2748, 12.6627, 20.2804, 24.5362, 18.8105, 15.3394, 12.1219, 15.9941, 19.0679, 16.4936, 17.0505, 16.8738, 17.3090, 16.6572, 16.8754, 16.6912, 15.1627, 18.8721, 19.6017, 18.5513],
[16.2141, 18.7298, 17.4216, 21.9312, 17.7606, 17.6177, 16.7766, 17.9859, 18.4132, 17.4505, 18.6385, 18.5396, 23.6260, 28.7443, 16.1817, 15.5148, 16.0035, 18.6652, 18.3087, 17.2960, 17.8223, 17.7776, 16.8686, 17.4093, 17.8037, 17.2544, 16.7231, 18.6195, 19.6784, 16.6647],
]
).to(torch_device) # fmt: skip
torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
def test_phi3_mini_128k_instruct_generation(self):
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-128k-instruct")
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-128k-instruct")
messages = [
{
"role": "system",
"content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.",
},
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
]
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
outputs = model.generate(inputs, max_new_tokens=32)
output_text = tokenizer.batch_decode(outputs)
EXPECTED_OUTPUT = [
"<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious and nutritious ways. Here are some creative and healthy"
]
self.assertListEqual(output_text, EXPECTED_OUTPUT)
def test_phi3_mini_128k_instruct_with_static_cache(self):
model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-128k-instruct")
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-128k-instruct")
messages = [
{
"role": "system",
"content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.",
},
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
]
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
response_tokens = Phi3MiniWithStaticCache.generate(model, inputs, 64)
output_text = tokenizer.batch_decode(torch.tensor([response_tokens], dtype=torch.long, device=torch_device))
EXPECTED_OUTPUT = [
"<|system|> You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.<|end|><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious and nutritious ways"
]
self.assertListEqual(output_text, EXPECTED_OUTPUT)
def test_phi3_mini_4k_sliding_window(self):
"""
This tests that Phi3 doesn't deteriorate in quality for long context generations. Since Phi3 has
sliding window attention, the test is tailored so that (context + max_new_tokens > sliding_window).
See #33586 for more
"""
model = Phi3ForCausalLM.from_pretrained(
"microsoft/Phi-3-mini-4k-instruct", device_map=torch_device, dtype=torch.bfloat16
)
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
input_text = """
<|user|>
Tell me about Paris, France.<|end|>
<|assistant|>
Paris, the capital city of France, is renowned for its rich history, iconic landmarks, and vibrant culture. Known as "The City of Light," Paris is situated in the north-central part of the country along the Seine River.
Here are some key aspects of Paris:
1. Landmarks: Paris is home to numerous famous landmarks, including the Eiffel Tower, the Louvre Museum, Notre-Dame Cathedral, and the Champs-Élysées. The Eiffel Tower, built in 1889, is an iconic symbol of Paris and attracts millions of tourists each year. The Louvre Museum, the world's largest art museum, houses thousands of works of art, including the Mona Lisa and the Venus de Milo.
2. History: Paris has a rich history dating back to the 3rd century BC, when it was founded by a Celtic tribe called the Parisii. Over the centuries, the city has been influenced by various cultures, including the Romans, the Franks, and the Normans. The French Revolution in the late 18th century marked a significant turning point in Paris's history, leading to the establishment of the modern French Republic.
3. Culture: Paris is a global center for art, fashion, gastronomy, and culture. The city is home to numerous museums, including the Centre Pompidou, Musée d'Orsay, and Musée Rodin. Paris is also known for its fashion industry, with many famous designers having their origins in the city. The city's cuisine is also highly regarded, with a focus on fresh ingredients, and a wide variety of dishes, including French classics like coq au vin, boeuf bourguignon, and crêpes.
4. Architecture: Parisian architecture is characterized by its diverse styles, ranging from Gothic and Romanesque to Art Nouveau and Art Deco. The city's famous Haussmannian buildings, designed by Baron Haussmann in the mid-19th century, are known for their uniform facades, wrought-iron balconies, and large windows.
5. Transportation: Paris has an extensive public transportation system, including the Paris Métro, RER (suburban trains), and buses. The city's iconic yellow taxis are also a popular mode of transportation.
6. Language: The official language of Paris is French, and the city's residents are known for their charm and politeness.
7. Festivals and Events: Paris hosts numerous festivals and events throughout the year, including the annual Bastille Day celebrations, the Paris Fashion Week, and the famous annual New Year's Eve fireworks on the Eiffel Tower.
8. Geography: Paris is located in the north-central part of France, with the Seine River running through the city. The city's geography is characterized by rolling hills and picturesque parks, such as the Bois de Boulogne and the Jardin des Tuileries.
9. Population: As of 2021, Paris has an estimated population of around 2.2 million residents, with the metropolitan area housing over 12 million people.
In summary, Paris is a city steeped in history, culture, and art, with a unique blend of architectural styles and a vibrant atmosphere that continues to captivate millions of visitors each year.<|end|>
<|user|>
Please give me a list of 5 architectural landmarks in Paris, France.<|end|>
<|assistant|>
1. Eiffel Tower: Designed by Gustave Eiffel and completed in 1889, the Eiffel Tower is an iconic symbol of Paris and France. Standing at 324 meters tall, it was the tallest man-made structure in the world until the completion of the Chrysler Building in New York in 1930. The Eiffel Tower is made of wrought iron and offers visitors stunning views of the city from its three levels.
2. Notre-Dame Cathedral: Located on the Île de la Cité, Notre-Dame Cathedral is a masterpiece of French Gothic architecture. Construction began in the 12th century and continued for over 200 years, with the cathedral's completion in the 14th century. The cathedral is famous for its intricate facade, stained-glass windows, and the iconic gargoyles and chimeras.
3. Louvre Museum: Originally built as a fortress in the 12th century, the Louvre Museum is now the world's largest art museum and a historic monument in Paris. The museum's most famous landmark is the iconic glass pyramid entrance, designed by architect I. M. Pei in the 1980s. The Louvre houses over 380,000 works of art, including the Mona Lisa and the Venus de Milo.
4. Sacré-Cœur Basilica: The Sacré-Cœur Basilica, also known as the Basilique du Sacré-Cœur, is a Roman Catholic church and minor basilica located at the summit of the butte Montmartre, the highest point in Paris. The basilica was designed by Paul Abadie and dedicated in 1914. Its white domes and lavender-colored travertine stone make it a distinctive landmark in the Paris skyline.
5. Arc de Triomphe: The Arc de Triomphe is a monumental structure located at the western end of the Champs-Élysées. Commissioned by Napoleon in 1806, the Arc was designed by Jean-François-Thérèse Chalgrin and completed in 1836. The monument honors those who fought and died for France during the French Revolutionary and Napoleonic Wars. The Arc features sculptural reliefs and inscriptions, and its façade is adorned with the names of 357 generals and 660 soldiers.
These five architectural landmarks showcase the diverse styles and historical periods of Paris, from Gothic to Neoclassical, and from the 19th to the 20th centuries. Each landmark has its unique features and contributes to the city's rich architectural heritage.<|end|>
<|user|>
Please give me a list of 10 famous items displayed in the Louvre Museum. Thanks!<|end|>
<|assistant|>
1. Mona Lisa: The Mona Lisa, painted by Leonardo da Vinci in the early 16th century, is arguably the most famous painting in the world. The portrait is known for its enigmatic smile and masterful use of sfumato, a technique that creates a soft, hazy effect.
2. Venus de Milo: This ancient Greek statue, believed to have been created around 130-100 BC, is a masterpiece of Hellenistic sculpture. The Venus de Milo is renowned for its graceful beauty and the mystery surrounding its missing arms.
3. Winged Victory of Samothrace: This Hellenistic sculpture, dating back to the 2nd century BC, depicts the Greek goddess Nike, the personification of victory. The sculpture is celebrated for its dynamic movement and intricate details.
4. Liberty Leading the People: This iconic painting by Eugène Delacroix, created in 1830, commemorates the July Revolution in France. The artwork depicts a woman personifying Liberty leading a group of revolutionaries over the bodies of the fallen.
5. The Wedding at Cana: A 1516 painting by Veronese, The Wedding at Cana is a large-scale work that depicts the biblical story of Jesus turning water into wine at a wedding feast. The painting is known for its vibrant colors and intricate details.
6. The Raft of the Medusa: This 1819 painting by Théodore Géricault is a powerful depiction of the aftermath of the shipwreck of the French frigate Méduse. The painting is famous for its dramatic composition and emotional intensity.
7. The Coronation of Napoleon: This 1805 painting by Jacques-Louis David portrays the coronation of Napoleon Bonaparte as Emperor of the French. The artwork is a masterpiece of neoclassical style and captures the pomp and ceremony of the event.
8. The Death of Socrates: A 1914 painting by Pablo Picasso, The Death of Socrates depicts the moment when the ancient Greek philosopher Socrates drinks the poison hemlock, as ordered by the Athenian government. The painting is a powerful expression of the existential themes associated with Socrates' death.
9. The Turkish Bath: A 1887 painting by Puvis de Chavannes, The Turkish Bath is a large-scale work that depicts a scene of relaxation and leisity in a traditional Turkish bath. The painting is known for its subdued color palette and serene atmosphere.
10. The Wedding at Cana: A 1901 painting by Giovanni Boldini, The Wedding at Cana is a lively depiction of a festive Italian wedding. The painting is characterized by Boldini's signature flamboyant style and his ability to capture the essence of the subjects with a sense of movement and energy.
These 10 famous items in the Louvre Museum showcase a diverse range of artistic styles and periods, from ancient Greek and Roman sculptures to Renaissance and 19th-century paintings. The collection at the Louvre is one of the world's largest and most important museums, housing over 800,000 works of art, and these famous items are just a small selection of the museum's vast collection.<|end|>
<|user|>
Please give me a list of 10 famous French dishes.<|end|>
<|assistant|>
"""
inputs = tokenizer(input_text, return_tensors="pt").to(device=torch_device)
outputs = model.generate(**inputs, max_new_tokens=100)
output_text = tokenizer.batch_decode(outputs[:, inputs.input_ids.shape[1] :], skip_special_tokens=True)
EXPECTED_OUTPUT = [
'1. Coq au Vin: Coq au Vin is a classic French dish that translates to "rooster in wine." The dish consists of chicken braised with wine, lardons, mushrooms, and garlic. It is a hearty and flavorful dish that is often served with potatoes or rice.\n\n 2. Boeuf Bourguignon: Boeuf Bourguignon is a traditional French beef stew that'
]
self.assertListEqual(output_text, EXPECTED_OUTPUT)
@pytest.mark.torch_export_test
@slow
def test_export_static_cache(self):
from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4
if not is_torch_greater_or_equal_than_2_4:
self.skipTest(reason="This test requires torch >= 2.4 to run.")
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from transformers.integrations.executorch import (
TorchExportableModuleWithStaticCache,
)
model_id = "microsoft/Phi-4-mini-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id, pad_token="</s>", padding_side="right")
expected_text_completions = Expectations(
{
("rocm", (9, 5)): ["You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user. A 45-year-old patient with a 10-year history of type 2 diabetes mellitus presents with a 2-year history of progressive, non-healing, and painful, 2.5 cm"],
("cuda", None): ["You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user. A 45-year-old patient with a 10-year history of type 2 diabetes mellitus, who is currently on metformin and a SGLT2 inhibitor, presents with a 2-year history"],
}
) # fmt: skip
EXPECTED_TEXT_COMPLETION = expected_text_completions.get_expectation()
max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[
"input_ids"
].shape[-1]
# Load config
config = AutoConfig.from_pretrained(model_id)
# NOTE: To make the model exportable we need to set the rope scaling to default to avoid hitting
# the data-dependent control flow in _longrope_frequency_update. Alternatively, we can rewrite
# that function to avoid the data-dependent control flow.
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
config.rope_scaling["type"] = "default"
# Load model
device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM
dtype = torch.bfloat16
cache_implementation = "static"
attn_implementation = "sdpa"
batch_size = 1
model = AutoModelForCausalLM.from_pretrained(
model_id,
config=config,
device_map=device,
dtype=dtype,
attn_implementation=attn_implementation,
generation_config=GenerationConfig(
use_cache=True,
cache_implementation=cache_implementation,
max_length=max_generation_length,
cache_config={
"batch_size": batch_size,
"max_cache_len": max_generation_length,
},
),
)
prompt = [
"You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user."
]
prompt_tokens = tokenizer(prompt, return_tensors="pt", padding=True).to(model.device)
prompt_token_ids = prompt_tokens["input_ids"]
max_new_tokens = max_generation_length - prompt_token_ids.shape[-1]
# Static Cache + export
from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM
exportable_module = TorchExportableModuleForDecoderOnlyLM(model)
exported_program = exportable_module.export(
input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device),
cache_position=torch.tensor([0], dtype=torch.long, device=model.device),
)
ep_generated_ids = TorchExportableModuleWithStaticCache.generate(
exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens
)
ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
| transformers/tests/models/phi3/test_modeling_phi3.py/0 | {
"file_path": "transformers/tests/models/phi3/test_modeling_phi3.py",
"repo_id": "transformers",
"token_count": 9656
} | 565 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
import numpy as np
import torch
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import PixtralProcessor
@require_vision
class PixtralProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = PixtralProcessor
@classmethod
def setUpClass(cls):
cls.url_0 = "https://www.ilankelman.org/stopsigns/australia.jpg"
cls.image_0 = np.random.randint(255, size=(3, 876, 1300), dtype=np.uint8)
cls.url_1 = "http://images.cocodataset.org/val2017/000000039769.jpg"
cls.image_1 = np.random.randint(255, size=(3, 480, 640), dtype=np.uint8)
cls.image_2 = np.random.randint(255, size=(3, 1024, 1024), dtype=np.uint8)
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
processor = PixtralProcessor.from_pretrained("mistral-community/pixtral-12b")
processor.save_pretrained(self.tmpdirname)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_image_token_filling(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
# Important to check with non square image
image = torch.randint(0, 2, (3, 500, 316))
expected_image_tokens = 640
image_token_index = 10
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
inputs = processor(
text=[processor.apply_chat_template(messages)],
images=[image],
return_tensors="pt",
)
image_tokens = (inputs["input_ids"] == image_token_index).sum().item()
self.assertEqual(expected_image_tokens, image_tokens)
def test_processor_with_single_image(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
prompt_string = "USER: [IMG]\nWhat's the content of the image? ASSISTANT:"
# Make small for checking image token expansion
processor.image_processor.size = {"longest_edge": 30}
processor.image_processor.patch_size = {"height": 2, "width": 2}
# Test passing in an image
inputs_image = processor(text=prompt_string, images=self.image_0, return_tensors="pt")
self.assertIn("input_ids", inputs_image)
self.assertTrue(len(inputs_image["input_ids"]) == 1)
self.assertIsInstance(inputs_image["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32]))
# fmt: off
input_ids = inputs_image["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to "USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the content of the image? ASSISTANT:"
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing in a url
inputs_url = processor(text=prompt_string, images=self.url_0, return_tensors="pt")
self.assertIn("input_ids", inputs_url)
self.assertTrue(len(inputs_url["input_ids"]) == 1)
self.assertIsInstance(inputs_url["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32]))
# fmt: off
input_ids = inputs_url["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to "USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the content of the image? ASSISTANT:"
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing inputs as a single list
inputs_image = processor(text=prompt_string, images=[self.image_0], return_tensors="pt")
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32]))
# fmt: off
self.assertEqual(
inputs_image["input_ids"][0].tolist(),
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test as nested single list
inputs_image = processor(text=prompt_string, images=[[self.image_0]], return_tensors="pt")
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([1, 3, 32, 32]))
# fmt: off
self.assertEqual(
inputs_image["input_ids"][0].tolist(),
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 4701, 1307, 1278, 3937, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
def test_processor_with_multiple_images_single_list(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
prompt_string = "USER: [IMG][IMG]\nWhat's the difference between these two images? ASSISTANT:"
# Make small for checking image token expansion
processor.image_processor.size = {"longest_edge": 30}
processor.image_processor.patch_size = {"height": 2, "width": 2}
# Test passing in an image
inputs_image = processor(text=prompt_string, images=[self.image_0, self.image_1], return_tensors="pt")
self.assertIn("input_ids", inputs_image)
self.assertTrue(len(inputs_image["input_ids"]) == 1)
self.assertIsInstance(inputs_image["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([2, 3, 32, 32]))
# fmt: off
input_ids = inputs_image["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"]
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing in a url
inputs_url = processor(text=prompt_string, images=[self.url_0, self.url_1], return_tensors="pt")
self.assertIn("input_ids", inputs_url)
self.assertTrue(len(inputs_url["input_ids"]) == 1)
self.assertIsInstance(inputs_url["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([2, 3, 32, 32]))
# fmt: off
input_ids = inputs_url["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"]
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing in as a nested list
inputs_url = processor(text=prompt_string, images=[[self.image_0, self.image_1]], return_tensors="pt")
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([2, 3, 32, 32]))
# fmt: off
self.assertEqual(
inputs_url["input_ids"][0].tolist(),
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
def test_processor_with_multiple_images_multiple_lists(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
prompt_string = [
"USER: [IMG][IMG]\nWhat's the difference between these two images? ASSISTANT:",
"USER: [IMG]\nWhat's the content of the image? ASSISTANT:",
]
processor.tokenizer.pad_token = "</s>"
image_inputs = [[self.image_0, self.image_1], [self.image_2]]
# Make small for checking image token expansion
processor.image_processor.size = {"longest_edge": 30}
processor.image_processor.patch_size = {"height": 2, "width": 2}
# Test passing in an image
inputs_image = processor(text=prompt_string, images=image_inputs, return_tensors="pt", padding=True)
self.assertIn("input_ids", inputs_image)
self.assertTrue(len(inputs_image["input_ids"]) == 2)
self.assertIsInstance(inputs_image["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([3, 3, 32, 32]))
# fmt: off
input_ids = inputs_image["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"]
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing in a url
inputs_url = processor(text=prompt_string, images=image_inputs, return_tensors="pt", padding=True)
self.assertIn("input_ids", inputs_url)
self.assertTrue(len(inputs_url["input_ids"]) == 2)
self.assertIsInstance(inputs_url["input_ids"], torch.Tensor)
self.assertIsInstance(inputs_image["pixel_values"], torch.Tensor)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([3, 3, 32, 32]))
# fmt: off
input_ids = inputs_url["input_ids"]
self.assertEqual(
input_ids[0].tolist(),
# Equivalent to ["USER: [IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END][IMG][IMG][IMG_BREAK][IMG][IMG][IMG_END]\nWhat's the difference between these two images? ASSISTANT:"]
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
# Test passing as a single flat list
inputs_image = processor(
text=prompt_string, images=[self.image_0, self.image_1, self.image_2], return_tensors="pt", padding=True
)
self.assertTrue(inputs_image["pixel_values"].shape == torch.Size([3, 3, 32, 32]))
# fmt: off
self.assertEqual(
inputs_image["input_ids"][0].tolist(),
[21510, 1058, 1032, 10, 10, 12, 10, 10, 13, 10, 10, 12, 10, 10, 13, 1010, 7493, 1681, 1278, 6592, 2396, 2576, 2295, 8061, 1063, 1349, 4290, 16002, 41150, 1058]
)
# fmt: on
def test_processor_returns_full_length_batches(self):
# to avoid https://github.com/huggingface/transformers/issues/34204
processor = self.processor_class.from_pretrained(self.tmpdirname)
prompt_string = [
"USER: [IMG]\nWhat's the content of the image? ASSISTANT:",
] * 5
processor.tokenizer.pad_token = "</s>"
image_inputs = [[self.image_0]] * 5
# Make small for checking image token expansion
processor.image_processor.size = {"longest_edge": 30}
processor.image_processor.patch_size = {"height": 2, "width": 2}
# Test passing in an image
inputs_image = processor(text=prompt_string, images=image_inputs, return_tensors="pt", padding=True)
self.assertIn("input_ids", inputs_image)
self.assertTrue(len(inputs_image["input_ids"]) == 5)
self.assertTrue(len(inputs_image["pixel_values"]) == 5)
| transformers/tests/models/pixtral/test_processing_pixtral.py/0 | {
"file_path": "transformers/tests/models/pixtral/test_processing_pixtral.py",
"repo_id": "transformers",
"token_count": 5838
} | 566 |
# Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import tempfile
import unittest
from transformers import ProphetNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
ProphetNetDecoder,
ProphetNetEncoder,
ProphetNetForCausalLM,
ProphetNetForConditionalGeneration,
ProphetNetModel,
ProphetNetTokenizer,
)
from transformers.modeling_outputs import BaseModelOutput
class ProphetNetModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
hidden_size=16,
encoder_seq_length=7,
decoder_seq_length=9,
# For common tests
is_training=True,
use_attention_mask=True,
use_labels=True,
decoder_start_token_id=0,
encoder_ffn_dim=32,
num_encoder_layers=2,
num_encoder_attention_heads=4,
decoder_ffn_dim=32,
num_decoder_layers=2,
num_decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
ngram=2,
num_buckets=32,
relative_max_distance=128,
disable_ngram_loss=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_decoder_layers
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.num_attention_heads = num_decoder_attention_heads
self.num_encoder_attention_heads = num_encoder_attention_heads
self.num_decoder_attention_heads = num_decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.ngram = ngram
self.num_buckets = num_buckets
self.relative_max_distance = relative_max_distance
self.disable_ngram_loss = disable_ngram_loss
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 7
self.num_hidden_states_types = 3 # encoder, decoder_main, decoder_ngram
self.decoder_attention_idx = 2
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
decoder_attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = self.get_config()
return (
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
)
def get_config(self):
return ProphetNetConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_encoder_layers=self.num_encoder_layers,
num_decoder_layers=self.num_decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_ffn_dim=self.encoder_ffn_dim,
num_encoder_attention_heads=self.num_encoder_attention_heads,
num_decoder_attention_heads=self.num_decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
ngram=self.ngram,
num_buckets=self.num_buckets,
relative_max_distance=self.relative_max_distance,
disable_ngram_loss=self.disable_ngram_loss,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = self.prepare_config_and_inputs()
encoder_hidden_states = floats_tensor([self.batch_size, self.encoder_seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
return (
config,
decoder_input_ids,
decoder_attention_mask,
encoder_hidden_states,
encoder_attention_mask,
lm_labels,
)
def check_prepare_lm_labels_via_shift_left(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetModel(config=config)
model.to(torch_device)
model.eval()
# make sure that lm_labels are correctly padded from the right
lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id)
# add casaul pad token mask
triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not()
lm_labels.masked_fill_(triangular_mask, self.pad_token_id)
decoder_input_ids = model._shift_right(lm_labels)
for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)):
# first item
self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id)
if i < decoder_input_ids_slice.shape[-1]:
if i < decoder_input_ids.shape[-1] - 1:
# items before diagonal
self.parent.assertListEqual(
decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist()
)
# pad items after diagonal
if i < decoder_input_ids.shape[-1] - 2:
self.parent.assertListEqual(
decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist()
)
else:
# all items after square
self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist())
def create_and_check_model(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetModel(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
decoder_output = result.last_hidden_state
decoder_past = result.past_key_values
encoder_output = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(decoder_past), config.num_decoder_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]), 4) # cross-attention + uni-directional self-attention
def create_and_check_with_lm_head(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetForConditionalGeneration(config=config).to(torch_device).eval()
outputs = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=lm_labels,
)
self.parent.assertEqual(len(outputs), 5)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_causal_lm_decoder(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetForCausalLM(config=config).to(torch_device).eval()
outputs = model(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
labels=lm_labels,
)
self.parent.assertEqual(len(outputs), 4)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_generate_with_past_key_value_states(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetForConditionalGeneration(config=config).to(torch_device).eval()
torch.manual_seed(0)
output_without_past_cache = model.generate(
input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False
)
torch.manual_seed(0)
output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True)
self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache))
def create_and_check_decoder_generate_with_past_key_value_states(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetForCausalLM(config=config).to(torch_device).eval()
torch.manual_seed(0)
output_without_past_cache = model.generate(
input_ids[:1], num_beams=2, max_length=10, do_sample=True, use_cache=False
)
torch.manual_seed(0)
output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=10, do_sample=True)
self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache))
def create_and_check_model_fp16_forward(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = ProphetNetModel(config=config).to(torch_device).half().eval()
output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"]
self.parent.assertFalse(torch.isnan(output).any().item())
def create_and_check_encoder_decoder_shared_weights(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
for model_class in [ProphetNetModel, ProphetNetForConditionalGeneration]:
torch.manual_seed(0)
model = model_class(config=config).to(torch_device).eval()
# load state dict copies weights but does not tie them
if model_class == ProphetNetForConditionalGeneration:
model.prophetnet.encoder.load_state_dict(model.prophetnet.decoder.state_dict(), strict=False)
else:
model.encoder.load_state_dict(model.decoder.state_dict(), strict=False)
torch.manual_seed(0)
tied_config = copy.deepcopy(config)
tied_config.tie_encoder_decoder = True
tied_model = model_class(config=tied_config).to(torch_device).eval()
model_result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
tied_model_result = tied_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
# check that models has less parameters
self.parent.assertLess(
sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters())
)
random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()
# check that outputs are equal
self.parent.assertTrue(
torch.allclose(
model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4
)
)
# check that outputs after saving and loading are equal
with tempfile.TemporaryDirectory() as tmpdirname:
tied_model.save_pretrained(tmpdirname)
tied_model = model_class.from_pretrained(tmpdirname)
tied_model.to(torch_device)
tied_model.eval()
# check that models has less parameters
self.parent.assertLess(
sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters())
)
random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()
tied_model_result = tied_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
# check that outputs are equal
self.parent.assertTrue(
torch.allclose(
model_result[0][0, :, random_slice_idx],
tied_model_result[0][0, :, random_slice_idx],
atol=1e-4,
)
)
def check_fast_integration(
self,
config,
*args,
):
input_ids = torch.tensor([[7, 4, 78, 0, 24, 52, 43]], device=torch_device, dtype=torch.long)
decoder_input_ids = torch.tensor([[12, 62, 25, 11, 47, 15, 14]], device=torch_device, dtype=torch.long)
attention_mask = torch.tensor([[1, 1, 1, 0, 1, 0, 0]], device=torch_device, dtype=torch.long)
decoder_attention_mask = torch.tensor([[1, 1, 1, 0, 0, 1, 0]], device=torch_device, dtype=torch.long)
lm_labels = torch.tensor([[62, 25, 11, 47, 15, 14, 24]], device=torch_device, dtype=torch.long)
torch.manual_seed(0)
config.ngram = 4
model = ProphetNetForConditionalGeneration(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
labels=lm_labels,
)
self.parent.assertTrue(torch.allclose(result.loss, torch.tensor(4.5892, device=torch_device), atol=1e-3))
expected_logit_slice = torch.tensor(
[-0.0184, 0.0758, -0.0543, -0.0093, 0.0050, -0.0660, -0.1453], device=torch_device
)
self.parent.assertTrue(torch.allclose(result.logits[0, :, 1], expected_logit_slice, atol=1e-3))
def check_model_with_attn_mask(self, config, input_ids, decoder_input_ids, *args):
model = ProphetNetModel(config=config)
model.to(torch_device)
model.eval()
outputs_no_mask = model(input_ids=input_ids[:, :5], decoder_input_ids=decoder_input_ids[:, :5])
attention_mask = torch.ones_like(input_ids)
decoder_attention_mask = torch.ones_like(decoder_input_ids)
attention_mask[:, 5:] = 0
outputs_with_mask = model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
# check encoder
self.parent.assertTrue(
torch.allclose(
outputs_no_mask.encoder_last_hidden_state[0, :, 0],
outputs_with_mask.encoder_last_hidden_state[0, :5, 0],
atol=1e-3,
)
)
# check decoder
# main stream
self.parent.assertTrue(
torch.allclose(
outputs_no_mask.last_hidden_state[0, :, 0], outputs_with_mask.last_hidden_state[0, :5, 0], atol=1e-3
)
)
# predict stream
self.parent.assertTrue(
torch.allclose(
outputs_no_mask.last_hidden_state_ngram[0, :5, 0],
outputs_with_mask.last_hidden_state_ngram[0, :5, 0],
atol=1e-2,
)
)
def check_causal_lm_from_pretrained(
self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, *args
):
model = ProphetNetForConditionalGeneration(config).to(torch_device).eval()
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
decoder = ProphetNetForCausalLM.from_pretrained(tmp_dirname).to(torch_device)
encoder_hidden_states = model.prophetnet.encoder(input_ids).last_hidden_state
model_outputs = model(
encoder_outputs=BaseModelOutput(last_hidden_state=encoder_hidden_states),
decoder_input_ids=decoder_input_ids,
)
dec_outputs = decoder(encoder_hidden_states=encoder_hidden_states, input_ids=decoder_input_ids)
self.parent.assertTrue(
torch.allclose(
model_outputs.logits[0, :5],
dec_outputs.logits[0, :5],
atol=1e-3,
)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"use_cache": False,
}
return config, inputs_dict
class ProphetNetStandaloneDecoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
hidden_size=16,
encoder_seq_length=7,
decoder_seq_length=7,
# For common tests
is_training=True,
is_decoder=True,
use_attention_mask=True,
add_cross_attention=False,
use_cache=False,
use_labels=True,
decoder_start_token_id=0,
encoder_ffn_dim=32,
num_encoder_layers=2,
num_encoder_attention_heads=4,
decoder_ffn_dim=32,
num_decoder_layers=2,
num_decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
ngram=2,
num_buckets=32,
relative_max_distance=128,
disable_ngram_loss=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_decoder_layers
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.num_attention_heads = num_decoder_attention_heads
self.num_encoder_attention_heads = num_encoder_attention_heads
self.num_decoder_attention_heads = num_decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.ngram = ngram
self.num_buckets = num_buckets
self.relative_max_distance = relative_max_distance
self.use_cache = use_cache
self.disable_ngram_loss = disable_ngram_loss
self.max_position_embeddings = max_position_embeddings
self.add_cross_attention = add_cross_attention
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.num_hidden_states_types = 2 # decoder_main, decoder_ngram
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
config = ProphetNetConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_encoder_layers=self.num_encoder_layers,
num_decoder_layers=self.num_decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_ffn_dim=self.encoder_ffn_dim,
num_encoder_attention_heads=self.num_encoder_attention_heads,
num_decoder_attention_heads=self.num_decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
ngram=self.ngram,
num_buckets=self.num_buckets,
relative_max_distance=self.relative_max_distance,
disable_ngram_loss=self.disable_ngram_loss,
max_position_embeddings=self.max_position_embeddings,
add_cross_attention=self.add_cross_attention,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
attention_mask,
lm_labels,
) = self.prepare_config_and_inputs()
encoder_hidden_states = floats_tensor([self.batch_size, self.encoder_seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = ProphetNetDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = ProphetNetDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values, use_cache=True)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
class ProphetNetStandaloneEncoderModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
hidden_size=16,
encoder_seq_length=7,
decoder_seq_length=7,
# For common tests
is_training=True,
is_decoder=False,
use_attention_mask=True,
add_cross_attention=False,
use_cache=False,
use_labels=True,
decoder_start_token_id=0,
encoder_ffn_dim=32,
num_encoder_layers=2,
num_encoder_attention_heads=4,
decoder_ffn_dim=32,
num_decoder_layers=2,
num_decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
num_buckets=32,
relative_max_distance=128,
disable_ngram_loss=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_decoder_layers
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.num_attention_heads = num_decoder_attention_heads
self.num_encoder_attention_heads = num_encoder_attention_heads
self.num_decoder_attention_heads = num_decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.num_buckets = num_buckets
self.relative_max_distance = relative_max_distance
self.use_cache = use_cache
self.disable_ngram_loss = disable_ngram_loss
self.max_position_embeddings = max_position_embeddings
self.add_cross_attention = add_cross_attention
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 1
self.num_hidden_states_types = 1
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
config = ProphetNetConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_encoder_layers=self.num_encoder_layers,
num_decoder_layers=self.num_decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_ffn_dim=self.encoder_ffn_dim,
num_encoder_attention_heads=self.num_encoder_attention_heads,
num_decoder_attention_heads=self.num_decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
num_buckets=self.num_buckets,
relative_max_distance=self.relative_max_distance,
disable_ngram_loss=self.disable_ngram_loss,
max_position_embeddings=self.max_position_embeddings,
add_cross_attention=self.add_cross_attention,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class ProphetNetModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (ProphetNetModel, ProphetNetForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": ProphetNetModel,
"summarization": ProphetNetForConditionalGeneration,
"text-generation": ProphetNetForCausalLM,
"text2text-generation": ProphetNetForConditionalGeneration,
"translation": ProphetNetForConditionalGeneration,
}
if is_torch_available()
else {}
)
test_pruning = False
test_resize_embeddings = False
is_encoder_decoder = True
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if pipeline_test_case_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `ProphetNetConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def setUp(self):
self.model_tester = ProphetNetModelTester(self)
self.config_tester = ConfigTester(self, config_class=ProphetNetConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_lm_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_lm_head(*config_and_inputs)
def test_only_decoder_causal_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_decoder(*config_and_inputs)
def test_fast_integration(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_fast_integration(*config_and_inputs)
def test_shared_weights(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs)
def test_shift_labels_via_shift_left(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs)
@unittest.skip(reason="Flaky test with no simple resolution. TODO Fix me @patrickvonplaten")
def test_decoder_model_generate(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_generate_with_past_key_value_states(*config_and_inputs)
def test_encoder_decoder_model_generate(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_generate_with_past_key_value_states(*config_and_inputs)
def test_attn_mask_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_model_with_attn_mask(*config_and_inputs)
def test_config_save(self):
config = self.model_tester.prepare_config_and_inputs()[0]
config.add_cross_attention = False
with tempfile.TemporaryDirectory() as tmp_dirname:
config.save_pretrained(tmp_dirname)
config = ProphetNetConfig.from_pretrained(tmp_dirname)
self.assertFalse(config.add_cross_attention)
def test_causal_lm_from_pretrained(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_causal_lm_from_pretrained(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
# methods overwrite method in `test_modeling_common.py`
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
correct_outlen = 7
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
(self.model_tester.ngram + 1) * decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
@require_torch
class ProphetNetStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (ProphetNetDecoder, ProphetNetForCausalLM) if is_torch_available() else ()
test_pruning = False
test_resize_embeddings = False
is_encoder_decoder = False
def setUp(self):
self.model_tester = ProphetNetStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=ProphetNetConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
return
@require_torch
class ProphetNetStandaloneEncoderModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (ProphetNetEncoder,) if is_torch_available() else ()
test_pruning = False
test_resize_embeddings = False
is_encoder_decoder = False
def setUp(self):
self.model_tester = ProphetNetStandaloneEncoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=ProphetNetConfig)
def test_config(self):
self.config_tester.run_common_tests()
@require_torch
class ProphetNetModelIntegrationTest(unittest.TestCase):
@slow
def test_pretrained_checkpoint_hidden_states(self):
model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased")
model.to(torch_device)
# encoder-decoder outputs
encoder_ids = torch.tensor(
[
[
2871,
102,
2048,
3176,
2780,
1997,
2871,
26727,
2169,
2097,
12673,
1996,
8457,
2006,
2049,
8240,
2859,
2799,
1012,
2023,
6512,
2038,
2174,
13977,
2195,
25962,
1012,
102,
]
]
).to(torch_device)
decoder_prev_ids = torch.tensor([[102, 2129, 2116, 2372, 2024, 2006, 2169, 1997, 2122, 2048, 2780, 1029]]).to(
torch_device
)
output = model(
input_ids=encoder_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=decoder_prev_ids,
)
output_predited_logits = output[0]
expected_shape = torch.Size((1, 12, 30522))
self.assertEqual(output_predited_logits.shape, expected_shape)
expected_slice = torch.tensor(
[[[-7.7729, -8.0343, -8.26001], [-7.74213, -7.8629, -8.6000], [-7.7328, -7.8269, -8.5264]]]
).to(torch_device)
# torch.testing.assert_close(output_predited_logits[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
assert torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4)
# encoder outputs
encoder_outputs = model.prophetnet.encoder(encoder_ids)[0]
expected_encoder_outputs_slice = torch.tensor(
[[[-0.2526, -0.1951, -0.2185], [-0.8923, 0.2992, -0.4623], [-0.4585, 0.0165, -0.6652]]]
).to(torch_device)
expected_shape_encoder = torch.Size((1, 28, 1024))
self.assertEqual(encoder_outputs.shape, expected_shape_encoder)
# torch.testing.assert_close(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, rtol=1e-4, atol=1e-4)
assert torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4)
# decoder outputs
decoder_outputs = model.prophetnet.decoder(decoder_prev_ids, encoder_hidden_states=encoder_outputs)
predicting_streams = decoder_outputs[1].view(1, model.config.ngram, 12, -1)
predicting_streams_logits = model.lm_head(predicting_streams)
next_first_stream_logits = predicting_streams_logits[:, 0]
# torch.testing.assert_close(next_first_stream_logits[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
assert torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4)
@slow
def test_cnndm_inference(self):
model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-cnndm")
model.config.max_length = 512
model.to(torch_device)
tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-cnndm")
ARTICLE_TO_SUMMARIZE = (
"USTC was founded in Beijing by the Chinese Academy of Sciences (CAS) in September 1958. The Director of"
" CAS, Mr. Guo Moruo was appointed the first president of USTC. USTC's founding mission was to develop a"
" high-level science and technology workforce, as deemed critical for development of China's economy,"
' defense, and science and technology education. The establishment was hailed as "A Major Event in the'
' History of Chinese Education and Science." CAS has supported USTC by combining most of its institutes'
" with the departments of the university. USTC is listed in the top 16 national key universities, becoming"
" the youngest national key university.".lower()
)
input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=511, return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
summary_ids = model.generate(
input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True
)
EXPECTED_SUMMARIZE_512 = (
"us ##tc was founded by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc is listed in the"
" top 16 national key universities ."
)
generated_titles = [
" ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids
]
self.assertListEqual(
[EXPECTED_SUMMARIZE_512],
generated_titles,
)
input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=99, return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
# actually 98 tokens are used. max_length=100 contains bos and eos.
summary_ids = model.generate(
input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True
)
EXPECTED_SUMMARIZE_100 = (
r"us ##tc was founded in beijing by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc "
"'"
" s founding mission was to develop a high - level science and technology workforce . [X_SEP]"
' establishment hailed as " a major event in the history of chinese education and science "'
)
generated_titles = [
" ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids
]
self.assertListEqual(
[EXPECTED_SUMMARIZE_100],
generated_titles,
)
@slow
def test_question_gen_inference(self):
model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg")
model.to(torch_device)
tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg")
INPUTS = [
"Bill Gates [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.",
"1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.",
"April 4, 1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.",
]
input_ids = tokenizer(INPUTS, truncation=True, padding=True, return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
gen_output = model.generate(input_ids, num_beams=5, early_stopping=True)
generated_questions = tokenizer.batch_decode(gen_output, skip_special_tokens=True)
EXPECTED_QUESTIONS = [
"along with paul allen, who founded microsoft?",
"what year was microsoft founded?",
"when was microsoft founded?",
]
self.assertListEqual(
EXPECTED_QUESTIONS,
generated_questions,
)
| transformers/tests/models/prophetnet/test_modeling_prophetnet.py/0 | {
"file_path": "transformers/tests/models/prophetnet/test_modeling_prophetnet.py",
"repo_id": "transformers",
"token_count": 25603
} | 567 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class RagRetrieverTest(TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
self.retrieval_vector_size = 8
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
def get_dpr_ctx_encoder_tokenizer(self) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
def get_bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_dummy_dataset(self):
dataset = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)],
}
)
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT)
return dataset
def get_dummy_canonical_hf_index_retriever(self):
dataset = self.get_dummy_dataset()
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
)
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = dataset
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
)
return retriever
def get_dummy_custom_hf_index_retriever(self, from_disk: bool):
dataset = self.get_dummy_dataset()
config = RagConfig(
retrieval_vector_size=self.retrieval_vector_size,
question_encoder=DPRConfig().to_dict(),
generator=BartConfig().to_dict(),
index_name="custom",
)
if from_disk:
config.passages_path = os.path.join(self.tmpdirname, "dataset")
config.index_path = os.path.join(self.tmpdirname, "index.faiss")
dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss"))
dataset.drop_index("embeddings")
dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset"))
del dataset
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
)
else:
retriever = RagRetriever(
config,
question_encoder_tokenizer=self.get_dpr_tokenizer(),
generator_tokenizer=self.get_bart_tokenizer(),
index=CustomHFIndex(config.retrieval_vector_size, dataset),
)
return retriever
def test_canonical_hf_index_retriever_retrieve(self):
n_docs = 1
retriever = self.get_dummy_canonical_hf_index_retriever()
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]), n_docs)
self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_canonical_hf_index_retriever_save_and_from_pretrained(self):
retriever = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
mock_load_dataset.return_value = self.get_dummy_dataset()
retriever.save_pretrained(tmp_dirname)
retriever = RagRetriever.from_pretrained(tmp_dirname)
self.assertIsInstance(retriever, RagRetriever)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever.retrieve(hidden_states, n_docs=1)
self.assertTrue(out is not None)
def test_custom_hf_index_retriever_retrieve(self):
n_docs = 1
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]), n_docs)
self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_custom_hf_index_retriever_save_and_from_pretrained(self):
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(tmp_dirname)
retriever = RagRetriever.from_pretrained(tmp_dirname)
self.assertIsInstance(retriever, RagRetriever)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever.retrieve(hidden_states, n_docs=1)
self.assertTrue(out is not None)
def test_custom_hf_index_retriever_retrieve_from_disk(self):
n_docs = 1
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(doc_dicts), 2)
self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]), n_docs)
self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]])
def test_custom_hf_index_retriever_save_and_from_pretrained_from_disk(self):
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=True)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(tmp_dirname)
retriever = RagRetriever.from_pretrained(tmp_dirname)
self.assertIsInstance(retriever, RagRetriever)
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever.retrieve(hidden_states, n_docs=1)
self.assertTrue(out is not None)
@require_torch
@require_tokenizers
@require_sentencepiece
def test_hf_index_retriever_call(self):
import torch
n_docs = 1
retriever = self.get_dummy_canonical_hf_index_retriever()
question_input_ids = [[5, 7], [10, 11]]
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs)
context_input_ids, context_attention_mask, retrieved_doc_embeds = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(context_input_ids, list)
self.assertIsInstance(context_attention_mask, list)
self.assertIsInstance(retrieved_doc_embeds, np.ndarray)
out = retriever(
question_input_ids,
hidden_states,
prefix=retriever.config.generator.prefix,
n_docs=n_docs,
return_tensors="pt",
)
context_input_ids, context_attention_mask, retrieved_doc_embeds, doc_ids = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(context_input_ids, torch.Tensor)
self.assertIsInstance(context_attention_mask, torch.Tensor)
self.assertIsInstance(retrieved_doc_embeds, torch.Tensor)
@require_torch
@require_tokenizers
@require_sentencepiece
def test_custom_hf_index_end2end_retriever_call(self):
context_encoder_tokenizer = self.get_dpr_ctx_encoder_tokenizer()
n_docs = 1
retriever = self.get_dummy_custom_hf_index_retriever(from_disk=False)
retriever.set_ctx_encoder_tokenizer(context_encoder_tokenizer)
question_input_ids = [[5, 7], [10, 11]]
hidden_states = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32
)
out = retriever(question_input_ids, hidden_states, prefix=retriever.config.generator.prefix, n_docs=n_docs)
self.assertEqual(
len(out), 6
) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask")), True
) # check for doc token related keys in dictionary.
| transformers/tests/models/rag/test_retrieval_rag.py/0 | {
"file_path": "transformers/tests/models/rag/test_retrieval_rag.py",
"repo_id": "transformers",
"token_count": 6761
} | 568 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.util import safe_repr
from transformers import AutoTokenizer, RwkvConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
RwkvForCausalLM,
RwkvModel,
)
class RwkvModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=False,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
def get_large_model_config(self):
return RwkvConfig.from_pretrained("sgugger/rwkv-4-pile-7b")
def prepare_config_and_inputs(
self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False
):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config(
gradient_checkpointing=gradient_checkpointing,
scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx,
reorder_and_upcast_attn=reorder_and_upcast_attn,
)
return (
config,
input_ids,
input_mask,
None,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def get_config(
self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False
):
return RwkvConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
intermediate_size=self.intermediate_size,
activation_function=self.hidden_act,
resid_pdrop=self.hidden_dropout_prob,
attn_pdrop=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
gradient_checkpointing=gradient_checkpointing,
scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx,
reorder_and_upcast_attn=reorder_and_upcast_attn,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
return config
def create_and_check_rwkv_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
config.output_hidden_states = True
model = RwkvModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.hidden_states), config.num_hidden_layers + 1)
def create_and_check_causl_lm(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = RwkvForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_state_equivalency(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = RwkvModel(config=config)
model.to(torch_device)
model.eval()
outputs = model(input_ids)
output_whole = outputs.last_hidden_state
outputs = model(input_ids[:, :2])
output_one = outputs.last_hidden_state
# Using the state computed on the first inputs, we will get the same output
outputs = model(input_ids[:, 2:], state=outputs.state)
output_two = outputs.last_hidden_state
self.parent.assertTrue(torch.allclose(torch.cat([output_one, output_two], dim=1), output_whole, atol=1e-5))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
@require_torch
class RwkvModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (RwkvModel, RwkvForCausalLM) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": RwkvModel, "text-generation": RwkvForCausalLM} if is_torch_available() else {}
)
fx_compatible = False
test_missing_keys = False
test_model_parallel = False
test_pruning = False
test_head_masking = False # Rwkv does not support head masking
def setUp(self):
self.model_tester = RwkvModelTester(self)
self.config_tester = ConfigTester(
self, config_class=RwkvConfig, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"]
)
def assertInterval(self, member, container, msg=None):
r"""
Simple utility function to check if a member is inside an interval.
"""
if isinstance(member, torch.Tensor):
max_value, min_value = member.max().item(), member.min().item()
elif isinstance(member, (list, tuple)):
max_value, min_value = max(member), min(member)
if not isinstance(container, list):
raise TypeError("container should be a list or tuple")
elif len(container) != 2:
raise ValueError("container should have 2 elements")
expected_min, expected_max = container
is_inside_interval = (min_value >= expected_min) and (max_value <= expected_max)
if not is_inside_interval:
standardMsg = f"{safe_repr(member)} not found in {safe_repr(container)}"
self.fail(self._formatMessage(msg, standardMsg))
def test_config(self):
self.config_tester.run_common_tests()
def test_rwkv_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_rwkv_model(*config_and_inputs)
def test_rwkv_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causl_lm(*config_and_inputs)
def test_state_equivalency(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_state_equivalency(*config_and_inputs)
def test_initialization(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config=config)
for name, param in model.named_parameters():
if "time_decay" in name:
if param.requires_grad:
self.assertTrue(param.data.max().item() == 3.0)
self.assertTrue(param.data.min().item() == -5.0)
elif "time_first" in name:
if param.requires_grad:
# check if it's a ones like
torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5)
elif any(x in name for x in ["time_mix_key", "time_mix_receptance"]):
if param.requires_grad:
self.assertInterval(
param.data,
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
elif "time_mix_value" in name:
if param.requires_grad:
self.assertInterval(
param.data,
[0.0, 1.3],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def test_attention_outputs(self):
r"""
Overriding the test_attention_outputs test as the attention outputs of Rwkv are different from other models
it has a shape `batch_size, seq_len, hidden_size`.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
batch_size = inputs["input_ids"].shape[0]
with torch.no_grad():
outputs = model(**inputs)
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
batch_size = inputs["input_ids"].shape[0]
with torch.no_grad():
outputs = model(**inputs)
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[batch_size, seq_len, config.hidden_size],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
batch_size = inputs["input_ids"].shape[0]
with torch.no_grad():
outputs = model(**inputs)
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[batch_size, seq_len, config.hidden_size],
)
@slow
def test_model_from_pretrained(self):
model_name = "RWKV/rwkv-4-169m-pile"
model = RwkvModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_beam_sample_generate_dict_output(self):
# This model has a custom attention output shape AND config flags, let's skip those checks
old_has_attentions = self.has_attentions
self.has_attentions = False
super().test_beam_sample_generate_dict_output()
self.has_attentions = old_has_attentions
def test_beam_search_generate_dict_output(self):
# This model has a custom attention output shape AND config flags, let's skip those checks
old_has_attentions = self.has_attentions
self.has_attentions = False
super().test_beam_search_generate_dict_output()
self.has_attentions = old_has_attentions
def test_constrained_beam_search_generate_dict_output(self):
# This model has a custom attention output shape AND config flags, let's skip those checks
old_has_attentions = self.has_attentions
self.has_attentions = False
super().test_constrained_beam_search_generate_dict_output()
self.has_attentions = old_has_attentions
def test_greedy_generate_dict_outputs(self):
# This model has a custom attention output shape AND config flags, let's skip those checks
old_has_attentions = self.has_attentions
self.has_attentions = False
super().test_greedy_generate_dict_outputs()
self.has_attentions = old_has_attentions
def test_group_beam_search_generate_dict_output(self):
# This model has a custom attention output shape AND config flags, let's skip those checks
old_has_attentions = self.has_attentions
self.has_attentions = False
super().test_group_beam_search_generate_dict_output()
self.has_attentions = old_has_attentions
def test_sample_generate_dict_output(self):
# This model has a custom attention output shape AND config flags, let's skip those checks
old_has_attentions = self.has_attentions
self.has_attentions = False
super().test_sample_generate_dict_output()
self.has_attentions = old_has_attentions
@unittest.skip("This model doesn't support padding")
def test_left_padding_compatibility(self):
pass
@slow
class RWKVIntegrationTests(unittest.TestCase):
def setUp(self):
self.model_id = "RWKV/rwkv-4-169m-pile"
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
def test_simple_generate(self):
expected_output = "Hello my name is Jasmine and I am a newbie to the"
model = RwkvForCausalLM.from_pretrained(self.model_id).to(torch_device)
input_ids = self.tokenizer("Hello my name is", return_tensors="pt").input_ids.to(torch_device)
output = model.generate(input_ids, max_new_tokens=10)
output_sentence = self.tokenizer.decode(output[0].tolist())
self.assertEqual(output_sentence, expected_output)
def test_simple_generate_bf16(self):
expected_output = "Hello my name is Jasmine and I am a newbie to the"
input_ids = self.tokenizer("Hello my name is", return_tensors="pt").input_ids.to(torch_device)
model = RwkvForCausalLM.from_pretrained(self.model_id, dtype=torch.bfloat16).to(torch_device)
output = model.generate(input_ids, max_new_tokens=10)
output_sentence = self.tokenizer.decode(output[0].tolist())
self.assertEqual(output_sentence, expected_output)
| transformers/tests/models/rwkv/test_modeling_rwkv.py/0 | {
"file_path": "transformers/tests/models/rwkv/test_modeling_rwkv.py",
"repo_id": "transformers",
"token_count": 8154
} | 569 |
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import SeamlessM4TFeatureExtractor, is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
global_rng = random.Random()
# Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list
def floats_list(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
values = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
class SeamlessM4TFeatureExtractionTester:
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=10,
padding_value=0.0,
sampling_rate=4_000,
return_attention_mask=True,
do_normalize=True,
stride=2,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.padding_value = padding_value
self.sampling_rate = sampling_rate
self.return_attention_mask = return_attention_mask
self.do_normalize = do_normalize
self.feature_size = feature_size
self.stride = stride
self.num_mel_bins = feature_size
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"stride": self.stride,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
# Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTester.prepare_inputs_for_common
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
speech_inputs = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
speech_inputs = [np.asarray(x) for x in speech_inputs]
return speech_inputs
@require_torch
class SeamlessM4TFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = SeamlessM4TFeatureExtractor if is_speech_available() else None
def setUp(self):
self.feat_extract_tester = SeamlessM4TFeatureExtractionTester(self)
def test_feat_extract_from_and_save_pretrained(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
self.assertDictEqual(dict_first, dict_second)
def test_feat_extract_to_json_file(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "feat_extract.json")
feat_extract_first.to_json_file(json_file_path)
feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
self.assertEqual(dict_first, dict_second)
def test_call_numpy(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
# Test feature size
input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[0] == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size * feature_extractor.stride)
# Test not batched input
encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
# Test batched
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Test 2-D numpy arrays are batched.
speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)]
np_speech_inputs = np.asarray(speech_inputs)
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
def test_call_with_padded_input_not_multiple_of_stride(self):
# same as test_call_numpy but with stride=6 and pad_to_multiple_of=8
# the input sizes 800, 1400 and 200 are a multiple of pad_to_multiple_of but not a multiple of stride
# therefore remainder = num_frames % self.stride will not be zero and must be subtracted from num_frames
stride = 6
pad_to_multiple_of = 8
feature_extractor_args = self.feat_extract_tester.prepare_feat_extract_dict()
feature_extractor_args["stride"] = stride
feature_extractor = self.feature_extraction_class(**feature_extractor_args)
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
# Test feature size and attention mask size
output = feature_extractor(np_speech_inputs, pad_to_multiple_of=pad_to_multiple_of, return_tensors="np")
input_features = output.input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[0] == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size * feature_extractor.stride)
# same as test_attention_mask
attention_mask = output.attention_mask
self.assertTrue(attention_mask.ndim == 2)
self.assertTrue(attention_mask.shape[0] == 3)
self.assertTrue(attention_mask.shape[-1] == input_features.shape[1])
# Test not batched input
encoded_sequences_1 = feature_extractor(
speech_inputs[0], pad_to_multiple_of=pad_to_multiple_of, return_tensors="np"
).input_features
encoded_sequences_2 = feature_extractor(
np_speech_inputs[0], pad_to_multiple_of=pad_to_multiple_of, return_tensors="np"
).input_features
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
# Test batched
encoded_sequences_1 = feature_extractor(
speech_inputs, pad_to_multiple_of=pad_to_multiple_of, return_tensors="np"
).input_features
encoded_sequences_2 = feature_extractor(
np_speech_inputs, pad_to_multiple_of=pad_to_multiple_of, return_tensors="np"
).input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Test 2-D numpy arrays are batched.
speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)]
np_speech_inputs = np.asarray(speech_inputs)
encoded_sequences_1 = feature_extractor(
speech_inputs, pad_to_multiple_of=pad_to_multiple_of, return_tensors="np"
).input_features
encoded_sequences_2 = feature_extractor(
np_speech_inputs, pad_to_multiple_of=pad_to_multiple_of, return_tensors="np"
).input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
def test_call_without_attention_mask(self):
feature_extractor_args = self.feat_extract_tester.prepare_feat_extract_dict()
feature_extractor = self.feature_extraction_class(**feature_extractor_args)
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
# Test attention mask when passing no attention mask to forward call
output = feature_extractor(np_speech_inputs, padding=True, return_tensors="np", return_attention_mask=False)
self.assertTrue("attention_mask" not in output)
# Test attention mask when no attention mask by default
feature_extractor_args["return_attention_mask"] = False
feature_extractor = self.feature_extraction_class(**feature_extractor_args)
output = feature_extractor(np_speech_inputs, padding=True, return_tensors="np", return_attention_mask=False)
self.assertTrue("attention_mask" not in output)
def test_attention_mask(self):
# test attention mask has the right output shape
feature_extractor_args = self.feat_extract_tester.prepare_feat_extract_dict()
feature_extractor = self.feature_extraction_class(**feature_extractor_args)
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
# Test attention mask when passing it to forward call
output = feature_extractor(np_speech_inputs, padding=True, return_tensors="np")
input_features = output.input_features
attention_mask = output.attention_mask
self.assertTrue(attention_mask.ndim == 2)
self.assertTrue(attention_mask.shape[0] == 3)
self.assertTrue(attention_mask.shape[-1] == input_features.shape[1])
@require_torch
def test_call_torch(self):
import torch
# Tests that all call wrap to encode_plus and batch_encode_plus
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
pt_speech_inputs = [torch.tensor(speech_input) for speech_input in speech_inputs]
# Test feature size
input_features = feature_extractor(pt_speech_inputs, padding=True, return_tensors="pt").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[0] == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size * feature_extractor.stride)
# Test not batched input
encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="pt").input_features
encoded_sequences_2 = feature_extractor(pt_speech_inputs[0], return_tensors="pt").input_features
torch.testing.assert_close(encoded_sequences_1, encoded_sequences_2, rtol=1e-3, atol=1e-3)
# Test batched
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="pt").input_features
encoded_sequences_2 = feature_extractor(pt_speech_inputs, return_tensors="pt").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
torch.testing.assert_close(enc_seq_1, enc_seq_2, rtol=1e-3, atol=1e-3)
# Test 2-D numpy arrays are batched.
speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)]
pt_speech_inputs = torch.tensor(speech_inputs)
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="pt").input_features
encoded_sequences_2 = feature_extractor(pt_speech_inputs, return_tensors="pt").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
torch.testing.assert_close(enc_seq_1, enc_seq_2, rtol=1e-3, atol=1e-3)
@require_torch
# Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad
def test_double_precision_pad(self):
import torch
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
np_speech_inputs = np.random.rand(100, 32).astype(np.float64)
py_speech_inputs = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.float32)
pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.float32)
def _load_datasample(self, id):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_sample = ds.sort("id")[id]["audio"]["array"]
return torch.from_numpy(speech_sample).unsqueeze(0)
def test_integration(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
-1.5621, -1.4236, -1.3335, -1.3991, -1.2881, -1.1133, -0.9710, -0.8895,
-0.8280, -0.7376, -0.7194, -0.6896, -0.6849, -0.6788, -0.6545, -0.6610,
-0.6566, -0.5738, -0.5252, -0.5533, -0.5887, -0.6116, -0.5971, -0.4956,
-0.2881, -0.1512, 0.0299, 0.1762, 0.2728, 0.2236
]
)
# fmt: on
input_speech = self._load_datasample(10)
feature_extractor = SeamlessM4TFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="pt").input_features
feature_extractor(input_speech, return_tensors="pt").input_features[0, 5, :30]
self.assertEqual(input_features.shape, (1, 279, 160))
torch.testing.assert_close(input_features[0, 5, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
def test_zero_mean_unit_variance_normalization_trunc_np_longest(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
audio = self._load_datasample(1)
audio = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
audio = feat_extract.zero_mean_unit_var_norm([audio], attention_mask=None)[0]
self.assertTrue((audio.mean() < 1e-3).all())
self.assertTrue(((audio.var() - 1).abs() < 1e-3).all())
| transformers/tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py/0 | {
"file_path": "transformers/tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py",
"repo_id": "transformers",
"token_count": 7167
} | 570 |
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch SmolVLM model."""
import copy
import unittest
from io import BytesIO
import pytest
import requests
from parameterized import parameterized
from transformers import (
AutoProcessor,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
Expectations,
cleanup,
is_flaky,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
GenerationConfig,
SmolVLMConfig,
SmolVLMForConditionalGeneration,
SmolVLMModel,
)
if is_vision_available():
from PIL import Image
class SmolVLMVisionText2TextModelTester:
def __init__(
self,
parent,
is_training=True,
batch_size=2,
scale_factor=2,
num_images=2,
vision_config={
"image_size": 16,
"patch_size": 4,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 32,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
text_config={
"vocab_size": 100,
"hidden_size": 64,
"intermediate_size": 56,
"num_hidden_layers": 3,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"hidden_act": "silu",
"max_position_embeddings": 256,
"initializer_range": 0.02,
"rms_norm_eps": 1e-6,
"pad_token_id": 2,
"bos_token_id": 0,
"eos_token_id": 1,
"image_token_id": 57,
"tie_word_embeddings": False,
"rope_theta": 10000.0,
"sliding_window": 32,
"attention_dropout": 0.0,
},
use_cache=False,
tie_word_embeddings=False,
image_token_id=57,
):
self.parent = parent
self.is_training = is_training
self.batch_size = batch_size
self.num_images = num_images
self.scale_factor = scale_factor
self.seq_length = (
int(((vision_config["image_size"] // vision_config["patch_size"]) ** 2) / (self.scale_factor**2))
* self.num_images
)
self.use_cache = use_cache
self.image_token_id = image_token_id
self.tie_word_embeddings = tie_word_embeddings
# Hack - add properties here so use common tests
self.vocab_size = text_config["vocab_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.hidden_size = text_config["hidden_size"]
self.vision_config = vision_config
self.text_config = text_config
def get_config(self):
return SmolVLMConfig(
use_cache=self.use_cache,
image_token_id=self.image_token_id,
tie_word_embeddings=self.tie_word_embeddings,
vision_config=self.vision_config,
text_config=self.text_config,
vocab_size=self.vocab_size,
scale_factor=self.scale_factor,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.num_images,
3, # SmolVLMImageProcessor always generates RGB pixel values
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1
# For simplicity just set the last n tokens to the image token
n_image_tokens_per_batch = self.seq_length
input_ids[:, -n_image_tokens_per_batch:] = self.image_token_id
attention_mask = input_ids.ne(1).to(torch_device)
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class SmolVLMModelTest(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `SmolVLM`.
"""
all_model_classes = (SmolVLMModel,) if is_torch_available() else ()
fx_compatible = False
test_torchscript = False
test_pruning = False
test_resize_embeddings = True
test_head_masking = False
def setUp(self):
self.model_tester = SmolVLMVisionText2TextModelTester(self)
self.config_tester = ConfigTester(
self, config_class=SmolVLMConfig, has_text_modality=False, common_properties=["image_token_id"]
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Model does not support padding right")
def test_flash_attn_2_inference_padding_right(self):
pass
@unittest.skip(reason="Compile not yet supported in SmolVLM models")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(reason="Compile not yet supported in SmolVLM models")
def test_sdpa_can_dispatch_on_flash(self):
pass
# We need to override as we need to prepare such that the image token is the last token
def test_resize_tokens_embeddings(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.text_config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Ignore copy
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.image_token_id
# make sure that decoder_input_ids are resized as well
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1)
self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size)
self.assertTrue(model.config.text_config.vocab_size, model.vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
# Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size
target_dimension = 128
model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0], target_dimension)
with self.assertRaisesRegex(
ValueError,
"Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer",
):
model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3)
# We need to override as we need to prepare such that the image token is the last token
def test_resize_embeddings_untied(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
original_config.tie_word_embeddings = False
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.image_token_id
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@require_torch
class SmolVLMForConditionalGenerationModelTest(GenerationTesterMixin, ModelTesterMixin, unittest.TestCase):
"""
Model tester for `SmolVLMForConditionalGeneration`.
"""
all_model_classes = (SmolVLMForConditionalGeneration,) if is_torch_available() else ()
all_generative_model_classes = (SmolVLMForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"image-text-to-text": SmolVLMForConditionalGeneration} if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = True
test_head_masking = False
test_torchscript = False
def setUp(self):
self.model_tester = SmolVLMVisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=SmolVLMConfig, has_text_modality=False)
@unittest.skip(reason="Model does not support padding right")
def test_flash_attn_2_inference_padding_right(self):
pass
@unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn")
def test_contrastive_generate(self):
pass
@unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn")
def test_contrastive_generate_dict_outputs_use_cache(self):
pass
@unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn")
def test_contrastive_generate_low_memory(self):
pass
@unittest.skip(
reason="Prompt lookup decoding needs a way to indicate `bad_word_ids` that should not be suggested as candidates"
)
def test_prompt_lookup_decoding_matches_greedy_search(self):
pass
@pytest.mark.generate
@is_flaky(description="TODO: check why flaky")
def test_generate_methods_with_logits_to_keep(self):
super().test_generate_methods_with_logits_to_keep()
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="Unsupported")
def test_generate_with_static_cache(self):
pass
@unittest.skip(reason="Compile not yet supported in SmolVLM models")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(reason="Compile not yet supported in SmolVLM models")
def test_sdpa_can_dispatch_on_flash(self):
pass
@pytest.mark.generate
@slow
@unittest.skip(
reason="SmolVLM doesn't support SDPA for all backbones, vision backbones has only eager/FA2 attention"
)
def test_eager_matches_sdpa_generate(self):
pass
@parameterized.expand([("random",), ("same",)])
@pytest.mark.generate
@unittest.skip(reason="Cache position is off by one leaving out image tokens, FIXME raushan")
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
pass
# We need to override as we need to prepare such that the image token is the last token
def test_resize_tokens_embeddings(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_vocab_size = config.text_config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.model.image_token_id
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1)
self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size)
self.assertTrue(model.config.text_config.vocab_size, model.vocab_size)
model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0] // 64, 0)
# Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size
target_dimension = 128
model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64)
self.assertTrue(model_embed.weight.shape[0], target_dimension)
with self.assertRaisesRegex(
ValueError,
"Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer",
):
model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3)
# We need to override as we need to prepare such that the image token is the last token
def test_resize_embeddings_untied(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
original_config.tie_word_embeddings = False
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token
inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2)
n_images = self.model_tester.num_images * self.model_tester.seq_length
model.model.image_token_id = model_vocab_size - 15 - 1
inputs_dict["input_ids"][:, -n_images:] = model.model.image_token_id
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@require_torch
class SmolVLMForConditionalGenerationIntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("HuggingFaceTB/SmolVLM2-256M-Video-Instruct")
self.image1 = Image.open(
BytesIO(
requests.get(
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
).content
)
)
self.video_messages = [
{
"role": "user",
"content": [
{
"type": "video",
"path": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_1_1080p.mov",
},
{"type": "text", "text": "Describe this video in detail"},
],
},
]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_integration_test(self):
model = SmolVLMForConditionalGeneration.from_pretrained(
"HuggingFaceTB/SmolVLM2-256M-Video-Instruct",
dtype=torch.bfloat16,
device_map="auto",
)
# Create inputs
text = "<image>In this image, we see"
images = self.image1
inputs = self.processor(text=text, images=images, return_tensors="pt", padding=True)
inputs.to(device=torch_device, dtype=torch.bfloat16)
generated_ids = model.generate(**inputs, max_new_tokens=9)
generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
expected_generated_text = "\n\n\n\nIn this image, we see a view of the Statue of Liberty and the"
self.assertEqual(generated_texts[0], expected_generated_text)
@slow
def test_integration_test_video(self):
model = SmolVLMForConditionalGeneration.from_pretrained(
"HuggingFaceTB/SmolVLM2-256M-Video-Instruct",
dtype=torch.bfloat16,
device_map="auto",
)
# Create inputs
inputs = self.processor.apply_chat_template(
self.video_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(device=torch_device, dtype=torch.bfloat16)
generated_ids = model.generate(**inputs, max_new_tokens=20)
generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
expected_generated_strings = Expectations(
{
(None, None): 'User: You are provided the following series of nine frames from a 0:00:09 [H:MM:SS] video.\n\nFrame from 00:00:\nFrame from 00:01:\nFrame from 00:02:\nFrame from 00:03:\nFrame from 00:04:\nFrame from 00:05:\nFrame from 00:06:\nFrame from 00:08:\nFrame from 00:09:\n\nDescribe this video in detail\nAssistant: The video depicts a large language model architecture, specifically a language model with a "quick brown" feature', # fmt: skip
("cuda", (8, 0)): 'User: You are provided the following series of nine frames from a 0:00:09 [H:MM:SS] video.\n\nFrame from 00:00:\nFrame from 00:01:\nFrame from 00:02:\nFrame from 00:03:\nFrame from 00:04:\nFrame from 00:05:\nFrame from 00:06:\nFrame from 00:08:\nFrame from 00:09:\n\nDescribe this video in detail\nAssistant: The video showcases a large language model architecture, specifically a "Quick Brown" model, which is designed', # fmt: skip
("cuda", (8, 6)): 'User: You are provided the following series of nine frames from a 0:00:09 [H:MM:SS] video.\n\nFrame from 00:00:\nFrame from 00:01:\nFrame from 00:02:\nFrame from 00:03:\nFrame from 00:04:\nFrame from 00:05:\nFrame from 00:06:\nFrame from 00:08:\nFrame from 00:09:\n\nDescribe this video in detail\nAssistant: The video showcases a large language model, specifically a neural network model, which is designed to learn and', # fmt: skip
("rocm", None): 'User: You are provided the following series of nine frames from a 0:00:09 [H:MM:SS] video.\n\nFrame from 00:00:\nFrame from 00:01:\nFrame from 00:02:\nFrame from 00:03:\nFrame from 00:04:\nFrame from 00:05:\nFrame from 00:06:\nFrame from 00:08:\nFrame from 00:09:\n\nDescribe this video in detail\nAssistant: The video showcases a large language model architecture, specifically a "Quick Brown" model, which is designed', # fmt: skip
}
) # fmt: skip
expected_generated_text = expected_generated_strings.get_expectation()
print(f"Generated text: {generated_texts[0]}")
self.assertEqual(generated_texts[0], expected_generated_text)
@slow
def test_export_smolvlm_vision_encoder(self):
from transformers import AutoConfig
from transformers.integrations.executorch import TorchExportableModuleForVLM
model_id = "HuggingFaceTB/SmolVLM2-256M-Video-Instruct"
# NOTE: The attention_mask is prepared internally in the vision encoder, depending on whether flash attention is used or not
# For ExecuTorch, flash attention is not supported, so the way of exporting vison encoder should be compatible with text-decoder
config = AutoConfig.from_pretrained(model_id)
config.text_config._flash_attn_2_enabled = False
# Load model and extract vision encoder
model = SmolVLMForConditionalGeneration.from_pretrained(
model_id,
dtype=torch.float32,
config=config,
)
exportable_module = TorchExportableModuleForVLM(model)
exported_program = exportable_module.export_vision_encoder()
self.assertIsInstance(exported_program, torch.export.ExportedProgram)
@slow
def test_export_smolvlm_connector(self):
from transformers import AutoConfig
from transformers.integrations.executorch import TorchExportableModuleForVLM
model_id = "HuggingFaceTB/SmolVLM2-256M-Video-Instruct"
# NOTE: The attention_mask is prepared internally in the vision encoder, depending on whether flash attention is used or not
# For ExecuTorch, flash attention is not supported, so the way of exporting vison encoder should be compatible with text-decoder
config = AutoConfig.from_pretrained(model_id)
config.text_config._flash_attn_2_enabled = False
# Load the model and extract the connector (multi-modal projector)
model = SmolVLMForConditionalGeneration.from_pretrained(
model_id,
dtype=torch.float32,
config=config,
)
connector = model.model.connector
connector.eval()
exportable_module = TorchExportableModuleForVLM(model)
exported_program = exportable_module.export_connector()
self.assertIsInstance(exported_program, torch.export.ExportedProgram)
@slow
def test_export_smolvlm_text_decoder(self):
from transformers import AutoConfig
from transformers.integrations.executorch import TorchExportableModuleForVLM
model_id = "HuggingFaceTB/SmolVLM2-256M-Video-Instruct"
# NOTE: The attention_mask is prepared internally in the vision encoder, depending on whether flash attention is used or not
# For ExecuTorch, flash attention is not supported, so the way of exporting vison encoder should be compatible with text-decoder
config = AutoConfig.from_pretrained(model_id)
config.text_config._flash_attn_2_enabled = False
config.text_config.use_cache = True
config.text_config.attn_implementation = "sdpa"
generation_config = GenerationConfig(
use_cache=True,
cache_implementation="static",
max_length=1234,
cache_config={
"batch_size": 1,
"max_cache_len": 1234,
},
)
# Load the model and extract the text decoder
model = SmolVLMForConditionalGeneration.from_pretrained(
model_id,
dtype=torch.float32,
config=config,
)
model.model.text_model.generation_config = generation_config
text_decoder = model.model.text_model
text_decoder.eval()
exportable_module = TorchExportableModuleForVLM(model)
exported_program = exportable_module.export_text_decoder()
self.assertIsInstance(exported_program, torch.export.ExportedProgram)
| transformers/tests/models/smolvlm/test_modeling_smolvlm.py/0 | {
"file_path": "transformers/tests/models/smolvlm/test_modeling_smolvlm.py",
"repo_id": "transformers",
"token_count": 13567
} | 571 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Splinter model."""
import copy
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import SplinterConfig, SplinterForPreTraining, SplinterForQuestionAnswering, SplinterModel
class SplinterModelTester:
def __init__(
self,
parent,
batch_size=13,
num_questions=3,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
question_token_id=1,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.num_questions = num_questions
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.question_token_id = question_token_id
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids[:, 1] = self.question_token_id
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
start_positions = None
end_positions = None
question_positions = None
if self.use_labels:
start_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size)
end_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size)
question_positions = ids_tensor([self.batch_size, self.num_questions], self.num_labels)
config = SplinterConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
question_token_id=self.question_token_id,
)
return (config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions)
def create_and_check_model(
self,
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
):
model = SplinterModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_question_answering(
self,
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
):
model = SplinterForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=start_positions[:, 0],
end_positions=end_positions[:, 0],
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_pretraining(
self,
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
):
model = SplinterForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=start_positions,
end_positions=end_positions,
question_positions=question_positions,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.num_questions, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.num_questions, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SplinterModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
SplinterModel,
SplinterForQuestionAnswering,
SplinterForPreTraining,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"feature-extraction": SplinterModel, "question-answering": SplinterForQuestionAnswering}
if is_torch_available()
else {}
)
# TODO: Fix the failed tests when this model gets more usage
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if pipeline_test_case_name == "QAPipelineTests":
return True
elif pipeline_test_case_name == "FeatureExtractionPipelineTests" and tokenizer_name.endswith("Fast"):
return True
return False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if return_labels:
if issubclass(model_class, SplinterForPreTraining):
inputs_dict["start_positions"] = torch.zeros(
self.model_tester.batch_size,
self.model_tester.num_questions,
dtype=torch.long,
device=torch_device,
)
inputs_dict["end_positions"] = torch.zeros(
self.model_tester.batch_size,
self.model_tester.num_questions,
dtype=torch.long,
device=torch_device,
)
inputs_dict["question_positions"] = torch.zeros(
self.model_tester.batch_size,
self.model_tester.num_questions,
dtype=torch.long,
device=torch_device,
)
elif issubclass(model_class, SplinterForQuestionAnswering):
inputs_dict["start_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
inputs_dict["end_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = SplinterModelTester(self)
self.config_tester = ConfigTester(self, config_class=SplinterConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
if isinstance(model, SplinterForPreTraining):
with self.assertRaises(TypeError):
# question_positions must not be None.
model(**inputs)[0]
else:
model(**inputs)[0]
@slow
def test_model_from_pretrained(self):
model_name = "tau/splinter-base"
model = SplinterModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# overwrite from common since `SplinterForPreTraining` could contain different number of question tokens in inputs.
# When the batch is distributed to multiple devices, each replica could get different values for the maximal number
# of question tokens (see `SplinterForPreTraining._prepare_question_positions()`), and the model returns different
# shape along dimension 1 (i.e. `num_questions`) that could not be combined into a single tensor as an output.
@require_torch_multi_gpu
def test_multi_gpu_data_parallel_forward(self):
from torch import nn
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# some params shouldn't be scattered by nn.DataParallel
# so just remove them if they are present.
blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"]
for k in blacklist_non_batched_params:
inputs_dict.pop(k, None)
# move input tensors to cuda:O
for k, v in inputs_dict.items():
if torch.is_tensor(v):
inputs_dict[k] = v.to(0)
for model_class in self.all_model_classes:
# Skip this case since it will fail sometimes, as described above.
if model_class == SplinterForPreTraining:
continue
model = model_class(config=config)
model.to(0)
model.eval()
# Wrap model in nn.DataParallel
model = nn.DataParallel(model)
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class))
@unittest.skip(
"Splinter GC with `use_reentrant` fails after #38751, FIXME raushan after deprecated args are removed"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
"Splinter GC with `use_reentrant` fails after #38751, FIXME raushan after deprecated args are removed"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@require_torch
class SplinterModelIntegrationTest(unittest.TestCase):
@slow
def test_splinter_question_answering(self):
model = SplinterForQuestionAnswering.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] Brad was born in [QUESTION] . He returned to the United Kingdom later . [SEP]"
# Output should be the span "the United Kingdom"
input_ids = torch.tensor(
[[101, 7796, 1108, 1255, 1107, 104, 119, 1124, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]]
)
output = model(input_ids)
expected_shape = torch.Size((1, 16))
self.assertEqual(output.start_logits.shape, expected_shape)
self.assertEqual(output.end_logits.shape, expected_shape)
self.assertEqual(torch.argmax(output.start_logits), 10)
self.assertEqual(torch.argmax(output.end_logits), 12)
@slow
def test_splinter_pretraining(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]]
)
question_positions = torch.tensor([[1, 5]], dtype=torch.long)
output = model(input_ids, question_positions=question_positions)
expected_shape = torch.Size((1, 2, 16))
self.assertEqual(output.start_logits.shape, expected_shape)
self.assertEqual(output.end_logits.shape, expected_shape)
self.assertEqual(torch.argmax(output.start_logits[0, 0]), 7)
self.assertEqual(torch.argmax(output.end_logits[0, 0]), 7)
self.assertEqual(torch.argmax(output.start_logits[0, 1]), 10)
self.assertEqual(torch.argmax(output.end_logits[0, 1]), 12)
@slow
def test_splinter_pretraining_loss_requires_question_positions(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]]
)
start_positions = torch.tensor([[7, 10]], dtype=torch.long)
end_positions = torch.tensor([7, 12], dtype=torch.long)
with self.assertRaises(TypeError):
model(
input_ids,
start_positions=start_positions,
end_positions=end_positions,
)
@slow
def test_splinter_pretraining_loss(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[
[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102],
[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102],
]
)
start_positions = torch.tensor([[7, 10], [7, 10]], dtype=torch.long)
end_positions = torch.tensor([[7, 12], [7, 12]], dtype=torch.long)
question_positions = torch.tensor([[1, 5], [1, 5]], dtype=torch.long)
output = model(
input_ids,
start_positions=start_positions,
end_positions=end_positions,
question_positions=question_positions,
)
self.assertAlmostEqual(output.loss.item(), 0.0024, 4)
@slow
def test_splinter_pretraining_loss_with_padding(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
# Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]"
# Output should be the spans "Brad" and "the United Kingdom"
input_ids = torch.tensor(
[
[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102],
]
)
start_positions = torch.tensor([[7, 10]], dtype=torch.long)
end_positions = torch.tensor([7, 12], dtype=torch.long)
question_positions = torch.tensor([[1, 5]], dtype=torch.long)
start_positions_with_padding = torch.tensor([[7, 10, 0]], dtype=torch.long)
end_positions_with_padding = torch.tensor([7, 12, 0], dtype=torch.long)
question_positions_with_padding = torch.tensor([[1, 5, 0]], dtype=torch.long)
output = model(
input_ids,
start_positions=start_positions,
end_positions=end_positions,
question_positions=question_positions,
)
output_with_padding = model(
input_ids,
start_positions=start_positions_with_padding,
end_positions=end_positions_with_padding,
question_positions=question_positions_with_padding,
)
self.assertAlmostEqual(output.loss.item(), output_with_padding.loss.item(), 4)
# Note that the original code uses 0 to denote padded question tokens
# and their start and end positions. As the pad_token_id of the model's
# config is used for the losse's ignore_index in SplinterForPreTraining,
# we add this test to ensure anybody making changes to the default
# value of the config, will be aware of the implication.
self.assertEqual(model.config.pad_token_id, 0)
@slow
def test_splinter_pretraining_prepare_question_positions(self):
model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass")
input_ids = torch.tensor(
[
[101, 104, 1, 2, 104, 3, 4, 102],
[101, 1, 104, 2, 104, 3, 104, 102],
[101, 1, 2, 104, 104, 3, 4, 102],
[101, 1, 2, 3, 4, 5, 104, 102],
]
)
question_positions = torch.tensor([[1, 4, 0], [2, 4, 6], [3, 4, 0], [6, 0, 0]], dtype=torch.long)
output_without_positions = model(input_ids)
output_with_positions = model(input_ids, question_positions=question_positions)
self.assertTrue((output_without_positions.start_logits == output_with_positions.start_logits).all())
self.assertTrue((output_without_positions.end_logits == output_with_positions.end_logits).all())
| transformers/tests/models/splinter/test_modeling_splinter.py/0 | {
"file_path": "transformers/tests/models/splinter/test_modeling_splinter.py",
"repo_id": "transformers",
"token_count": 9810
} | 572 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch SwiftFormer model."""
import unittest
from transformers import SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SwiftFormerModelTester:
def __init__(
self,
parent,
batch_size=13,
num_channels=3,
is_training=True,
use_labels=True,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
image_size=224,
num_labels=3,
layer_depths=[1, 1, 1, 1],
embed_dims=[16, 16, 32, 32],
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.num_labels = num_labels
self.image_size = image_size
self.layer_depths = layer_depths
self.embed_dims = embed_dims
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return SwiftFormerConfig(
depths=self.layer_depths,
embed_dims=self.embed_dims,
mlp_ratio=4,
downsamples=[True, True, True, True],
hidden_act="gelu",
num_labels=self.num_labels,
down_patch_size=3,
down_stride=2,
down_pad=1,
drop_rate=0.0,
drop_path_rate=0.0,
use_layer_scale=True,
layer_scale_init_value=1e-5,
)
def create_and_check_model(self, config, pixel_values, labels):
model = SwiftFormerModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dims[-1], 7, 7))
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = SwiftFormerForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
model = SwiftFormerForImageClassification(config)
model.to(torch_device)
model.eval()
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
(config, pixel_values, labels) = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SwiftFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as SwiftFormer does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
has_attentions = False
test_torch_exportable = True
def setUp(self):
self.model_tester = SwiftFormerModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=SwiftFormerConfig,
has_text_modality=False,
hidden_size=37,
num_attention_heads=12,
num_hidden_layers=12,
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "MBZUAI/swiftformer-xs"
model = SwiftFormerModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(reason="SwiftFormer does not output attentions")
def test_attention_outputs(self):
pass
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_stages = 8
self.assertEqual(len(hidden_states), expected_num_stages) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(hidden_states)):
self.assertEqual(
hidden_states[i].shape,
torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
]
),
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if name.endswith(".w_g"):
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class SwiftFormerModelIntegrationTest(unittest.TestCase):
@cached_property
def default_image_processor(self):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs") if is_vision_available() else None
@slow
def test_inference_image_classification_head(self):
model = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]]).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
| transformers/tests/models/swiftformer/test_modeling_swiftformer.py/0 | {
"file_path": "transformers/tests/models/swiftformer/test_modeling_swiftformer.py",
"repo_id": "transformers",
"token_count": 4504
} | 573 |
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_torchvision, require_vision
from transformers.utils import is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import TimmWrapperConfig, TimmWrapperImageProcessor
@require_torch
@require_vision
@require_torchvision
class TimmWrapperImageProcessingTest(unittest.TestCase):
image_processing_class = TimmWrapperImageProcessor if is_vision_available() else None
def setUp(self):
super().setUp()
self.temp_dir = tempfile.TemporaryDirectory()
config = TimmWrapperConfig.from_pretrained("timm/resnet18.a1_in1k")
config.save_pretrained(self.temp_dir.name)
def tearDown(self):
self.temp_dir.cleanup()
def test_load_from_hub(self):
image_processor = TimmWrapperImageProcessor.from_pretrained("timm/resnet18.a1_in1k")
self.assertIsInstance(image_processor, TimmWrapperImageProcessor)
def test_load_from_local_dir(self):
image_processor = TimmWrapperImageProcessor.from_pretrained(self.temp_dir.name)
self.assertIsInstance(image_processor, TimmWrapperImageProcessor)
def test_image_processor_properties(self):
image_processor = TimmWrapperImageProcessor.from_pretrained(self.temp_dir.name)
self.assertTrue(hasattr(image_processor, "data_config"))
self.assertTrue(hasattr(image_processor, "val_transforms"))
self.assertTrue(hasattr(image_processor, "train_transforms"))
def test_image_processor_call_numpy(self):
image_processor = TimmWrapperImageProcessor.from_pretrained(self.temp_dir.name)
single_image = np.random.randint(256, size=(256, 256, 3), dtype=np.uint8)
batch_images = [single_image, single_image, single_image]
# single image
pixel_values = image_processor(single_image).pixel_values
self.assertEqual(pixel_values.shape, (1, 3, 224, 224))
# batch images
pixel_values = image_processor(batch_images).pixel_values
self.assertEqual(pixel_values.shape, (3, 3, 224, 224))
def test_image_processor_call_pil(self):
image_processor = TimmWrapperImageProcessor.from_pretrained(self.temp_dir.name)
single_image = Image.fromarray(np.random.randint(256, size=(256, 256, 3), dtype=np.uint8))
batch_images = [single_image, single_image, single_image]
# single image
pixel_values = image_processor(single_image).pixel_values
self.assertEqual(pixel_values.shape, (1, 3, 224, 224))
# batch images
pixel_values = image_processor(batch_images).pixel_values
self.assertEqual(pixel_values.shape, (3, 3, 224, 224))
def test_image_processor_call_tensor(self):
image_processor = TimmWrapperImageProcessor.from_pretrained(self.temp_dir.name)
single_image = torch.from_numpy(np.random.randint(256, size=(3, 256, 256), dtype=np.uint8)).float()
batch_images = [single_image, single_image, single_image]
# single image
pixel_values = image_processor(single_image).pixel_values
self.assertEqual(pixel_values.shape, (1, 3, 224, 224))
# batch images
pixel_values = image_processor(batch_images).pixel_values
self.assertEqual(pixel_values.shape, (3, 3, 224, 224))
| transformers/tests/models/timm_wrapper/test_image_processing_timm_wrapper.py/0 | {
"file_path": "transformers/tests/models/timm_wrapper/test_image_processing_timm_wrapper.py",
"repo_id": "transformers",
"token_count": 1470
} | 574 |
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pathlib
import unittest
import numpy as np
from parameterized import parameterized
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import AnnotationFormatTestMixin, ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
if is_torchvision_available():
from transformers import YolosImageProcessorFast
class YolosImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
do_rescale=True,
rescale_factor=1 / 255,
do_pad=True,
):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to YolosImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
width, height = image.size
elif isinstance(image, np.ndarray):
height, width = image.shape[0], image.shape[1]
else:
height, width = image.shape[1], image.shape[2]
size = self.size["shortest_edge"]
max_size = self.size.get("longest_edge", None)
if max_size is not None:
min_original_size = float(min((height, width)))
max_original_size = float(max((height, width)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if width <= height and width != size:
height = int(size * height / width)
width = size
elif height < width and height != size:
width = int(size * width / height)
height = size
width_mod = width % 16
height_mod = height % 16
expected_width = width - width_mod
expected_height = height - height_mod
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
def expected_output_image_shape(self, images):
height, width = self.get_expected_values(images, batched=True)
return self.num_channels, height, width
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = YolosImageProcessor if is_vision_available() else None
fast_image_processing_class = YolosImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = YolosImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333})
self.assertEqual(image_processor.do_pad, True)
image_processor = image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False
)
self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84})
self.assertEqual(image_processor.do_pad, False)
def test_equivalence_padding(self):
# Initialize image_processings
image_processing_1 = self.image_processing_class(**self.image_processor_dict)
image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test whether the method "pad" and calling the image processor return the same tensors
encoded_images_with_method = image_processing_1.pad(image_inputs, return_tensors="pt")
encoded_images = image_processing_2(image_inputs, return_tensors="pt")
torch.testing.assert_close(
encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], rtol=1e-4, atol=1e-4
)
@parameterized.expand(
[
((3, 100, 1500), 1333, 800),
((3, 400, 400), 1333, 800),
((3, 1500, 1500), 1333, 800),
((3, 800, 1333), 1333, 800),
((3, 1333, 800), 1333, 800),
((3, 800, 800), 400, 400),
]
)
def test_resize_max_size_respected(self, image_size, longest_edge, shortest_edge):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
# create torch tensors as image
image = torch.randint(0, 256, image_size, dtype=torch.uint8)
processed_image = image_processor(
image,
size={"longest_edge": longest_edge, "shortest_edge": shortest_edge},
do_pad=False,
return_tensors="pt",
)["pixel_values"]
shape = list(processed_image.shape[-2:])
max_size, min_size = max(shape), min(shape)
self.assertTrue(max_size <= 1333, f"Expected max_size <= 1333, got image shape {shape}")
self.assertTrue(min_size <= 800, f"Expected min_size <= 800, got image shape {shape}")
@slow
def test_call_pytorch_with_coco_detection_annotations(self):
# prepare image and target
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
target = json.loads(f.read())
target = {"image_id": 39769, "annotations": target}
for image_processing_class in self.image_processor_list:
# encode them
image_processing = image_processing_class.from_pretrained("hustvl/yolos-small")
encoding = image_processing(images=image, annotations=target, return_tensors="pt")
# verify pixel values
expected_shape = torch.Size([1, 3, 800, 1056])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([5832.7256, 11144.6689, 484763.2500, 829269.8125, 146579.4531, 164177.6250])
torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1056])
torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
def test_call_pytorch_with_coco_panoptic_annotations(self):
# prepare image, target and masks_path
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f:
target = json.loads(f.read())
target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
for image_processing_class in self.image_processor_list:
# encode them
image_processing = image_processing_class(format="coco_panoptic")
encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt")
# verify pixel values
expected_shape = torch.Size([1, 3, 800, 1056])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([146591.5000, 163974.2500, 480092.2500, 11187.0000, 5824.5000, 7562.5000])
torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify masks
expected_masks_sum = 815161
relative_error = torch.abs(encoding["labels"][0]["masks"].sum() - expected_masks_sum) / expected_masks_sum
self.assertTrue(relative_error < 1e-3)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1056])
torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
# Output size is slight different from DETR as yolos takes mod of 16
@slow
def test_batched_coco_detection_annotations(self):
image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
target = json.loads(f.read())
annotations_0 = {"image_id": 39769, "annotations": target}
annotations_1 = {"image_id": 39769, "annotations": target}
# Adjust the bounding boxes for the resized image
w_0, h_0 = image_0.size
w_1, h_1 = image_1.size
for i in range(len(annotations_1["annotations"])):
coords = annotations_1["annotations"][i]["bbox"]
new_bbox = [
coords[0] * w_1 / w_0,
coords[1] * h_1 / h_0,
coords[2] * w_1 / w_0,
coords[3] * h_1 / h_0,
]
annotations_1["annotations"][i]["bbox"] = new_bbox
images = [image_0, image_1]
annotations = [annotations_0, annotations_1]
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class()
encoding = image_processing(
images=images,
annotations=annotations,
return_segmentation_masks=True,
return_tensors="pt", # do_convert_annotations=True
)
# Check the pixel values have been padded
postprocessed_height, postprocessed_width = 800, 1056
expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
# Check the bounding boxes have been adjusted for padded images
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
expected_boxes_0 = torch.tensor(
[
[0.6879, 0.4609, 0.0755, 0.3691],
[0.2118, 0.3359, 0.2601, 0.1566],
[0.5011, 0.5000, 0.9979, 1.0000],
[0.5010, 0.5020, 0.9979, 0.9959],
[0.3284, 0.5944, 0.5884, 0.8112],
[0.8394, 0.5445, 0.3213, 0.9110],
]
)
expected_boxes_1 = torch.tensor(
[
[0.4169, 0.2765, 0.0458, 0.2215],
[0.1284, 0.2016, 0.1576, 0.0940],
[0.3792, 0.4933, 0.7559, 0.9865],
[0.3794, 0.5002, 0.7563, 0.9955],
[0.1990, 0.5456, 0.3566, 0.8646],
[0.5845, 0.4115, 0.3462, 0.7161],
]
)
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3, atol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1056]))
self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1056]))
# Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
# format and not in the range [0, 1]
encoding = image_processing(
images=images,
annotations=annotations,
return_segmentation_masks=True,
do_convert_annotations=False,
return_tensors="pt",
)
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
# Convert to absolute coordinates
unnormalized_boxes_0 = torch.vstack(
[
expected_boxes_0[:, 0] * postprocessed_width,
expected_boxes_0[:, 1] * postprocessed_height,
expected_boxes_0[:, 2] * postprocessed_width,
expected_boxes_0[:, 3] * postprocessed_height,
]
).T
unnormalized_boxes_1 = torch.vstack(
[
expected_boxes_1[:, 0] * postprocessed_width,
expected_boxes_1[:, 1] * postprocessed_height,
expected_boxes_1[:, 2] * postprocessed_width,
expected_boxes_1[:, 3] * postprocessed_height,
]
).T
# Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
expected_boxes_0 = torch.vstack(
[
unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
]
).T
expected_boxes_1 = torch.vstack(
[
unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1, atol=1)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1, atol=1)
# Output size is slight different from DETR as yolos takes mod of 16
def test_batched_coco_panoptic_annotations(self):
# prepare image, target and masks_path
image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f:
target = json.loads(f.read())
annotation_0 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
annotation_1 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
w_0, h_0 = image_0.size
w_1, h_1 = image_1.size
for i in range(len(annotation_1["segments_info"])):
coords = annotation_1["segments_info"][i]["bbox"]
new_bbox = [
coords[0] * w_1 / w_0,
coords[1] * h_1 / h_0,
coords[2] * w_1 / w_0,
coords[3] * h_1 / h_0,
]
annotation_1["segments_info"][i]["bbox"] = new_bbox
masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
images = [image_0, image_1]
annotations = [annotation_0, annotation_1]
# encode them
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class()
image_processing = YolosImageProcessor(format="coco_panoptic")
encoding = image_processing(
images=images,
annotations=annotations,
masks_path=masks_path,
return_tensors="pt",
return_segmentation_masks=True,
)
# Check the pixel values have been padded
postprocessed_height, postprocessed_width = 800, 1056
expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
# Check the bounding boxes have been adjusted for padded images
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
expected_boxes_0 = torch.tensor(
[
[0.2625, 0.5437, 0.4688, 0.8625],
[0.7719, 0.4104, 0.4531, 0.7125],
[0.5000, 0.4927, 0.9969, 0.9854],
[0.1688, 0.2000, 0.2063, 0.0917],
[0.5492, 0.2760, 0.0578, 0.2187],
[0.4992, 0.4990, 0.9984, 0.9979],
]
)
expected_boxes_1 = torch.tensor(
[
[0.1591, 0.3262, 0.2841, 0.5175],
[0.4678, 0.2463, 0.2746, 0.4275],
[0.3030, 0.2956, 0.6042, 0.5913],
[0.1023, 0.1200, 0.1250, 0.0550],
[0.3329, 0.1656, 0.0350, 0.1312],
[0.3026, 0.2994, 0.6051, 0.5987],
]
)
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3, atol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1056]))
self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1056]))
# Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
# format and not in the range [0, 1]
encoding = image_processing(
images=images,
annotations=annotations,
masks_path=masks_path,
return_segmentation_masks=True,
do_convert_annotations=False,
return_tensors="pt",
)
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
# Convert to absolute coordinates
unnormalized_boxes_0 = torch.vstack(
[
expected_boxes_0[:, 0] * postprocessed_width,
expected_boxes_0[:, 1] * postprocessed_height,
expected_boxes_0[:, 2] * postprocessed_width,
expected_boxes_0[:, 3] * postprocessed_height,
]
).T
unnormalized_boxes_1 = torch.vstack(
[
expected_boxes_1[:, 0] * postprocessed_width,
expected_boxes_1[:, 1] * postprocessed_height,
expected_boxes_1[:, 2] * postprocessed_width,
expected_boxes_1[:, 3] * postprocessed_height,
]
).T
# Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
expected_boxes_0 = torch.vstack(
[
unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
]
).T
expected_boxes_1 = torch.vstack(
[
unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_max_width_max_height_resizing_and_pad_strategy with Detr->Yolos
def test_max_width_max_height_resizing_and_pad_strategy(self):
for image_processing_class in self.image_processor_list:
image_1 = torch.ones([200, 100, 3], dtype=torch.uint8)
# do_pad=False, max_height=100, max_width=100, image=200x100 -> 100x50
image_processor = image_processing_class(
size={"max_height": 100, "max_width": 100},
do_pad=False,
)
inputs = image_processor(images=[image_1], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 50]))
# do_pad=False, max_height=300, max_width=100, image=200x100 -> 200x100
image_processor = image_processing_class(
size={"max_height": 300, "max_width": 100},
do_pad=False,
)
inputs = image_processor(images=[image_1], return_tensors="pt")
# do_pad=True, max_height=100, max_width=100, image=200x100 -> 100x100
image_processor = image_processing_class(
size={"max_height": 100, "max_width": 100}, do_pad=True, pad_size={"height": 100, "width": 100}
)
inputs = image_processor(images=[image_1], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 100]))
# do_pad=True, max_height=300, max_width=100, image=200x100 -> 300x100
image_processor = image_processing_class(
size={"max_height": 300, "max_width": 100},
do_pad=True,
pad_size={"height": 301, "width": 101},
)
inputs = image_processor(images=[image_1], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 301, 101]))
### Check for batch
image_2 = torch.ones([100, 150, 3], dtype=torch.uint8)
# do_pad=True, max_height=150, max_width=100, images=[200x100, 100x150] -> 150x100
image_processor = image_processing_class(
size={"max_height": 150, "max_width": 100},
do_pad=True,
pad_size={"height": 150, "width": 100},
)
inputs = image_processor(images=[image_1, image_2], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([2, 3, 150, 100]))
| transformers/tests/models/yolos/test_image_processing_yolos.py/0 | {
"file_path": "transformers/tests/models/yolos/test_image_processing_yolos.py",
"repo_id": "transformers",
"token_count": 14287
} | 575 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
AutoModelForTableQuestionAnswering,
AutoTokenizer,
TableQuestionAnsweringPipeline,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
require_torch,
slow,
)
@is_pipeline_test
class TQAPipelineTests(unittest.TestCase):
# Putting it there for consistency, but TQA do not have fast tokenizer
# which are needed to generate automatic tests
model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
@require_torch
def test_small_model_pt(self, dtype="float32"):
model_id = "lysandre/tiny-tapas-random-wtq"
model = AutoModelForTableQuestionAnswering.from_pretrained(model_id, dtype=dtype)
tokenizer = AutoTokenizer.from_pretrained(model_id)
self.assertIsInstance(model.config.aggregation_labels, dict)
self.assertIsInstance(model.config.no_aggregation_label_index, int)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer, max_new_tokens=20)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most"
" active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@require_torch
def test_small_model_pt_fp16(self):
self.test_small_model_pt(dtype="float16")
@require_torch
def test_slow_tokenizer_sqa_pt(self, dtype="float32"):
model_id = "lysandre/tiny-tapas-random-sqa"
model = AutoModelForTableQuestionAnswering.from_pretrained(model_id, dtype=dtype)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer, max_new_tokens=20)
inputs = {
"table": {
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
"query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
}
sequential_outputs = table_querier(**inputs, sequential=True)
batch_outputs = table_querier(**inputs, sequential=False)
self.assertEqual(len(sequential_outputs), 3)
self.assertEqual(len(batch_outputs), 3)
self.assertEqual(sequential_outputs[0], batch_outputs[0])
self.assertNotEqual(sequential_outputs[1], batch_outputs[1])
# self.assertNotEqual(sequential_outputs[2], batch_outputs[2])
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer, max_new_tokens=20)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most"
" active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@require_torch
def test_slow_tokenizer_sqa_pt_fp16(self):
self.test_slow_tokenizer_sqa_pt(dtype="float16")
@slow
@require_torch
def test_integration_wtq_pt(self, dtype="float32"):
table_querier = pipeline("table-question-answering", dtype=dtype)
data = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
queries = [
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
]
results = table_querier(data, queries)
expected_results = [
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{
"answer": "COUNT > Transformers, Datasets, Tokenizers",
"coordinates": [(0, 0), (1, 0), (2, 0)],
"cells": ["Transformers", "Datasets", "Tokenizers"],
"aggregator": "COUNT",
},
{
"answer": "AVERAGE > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "AVERAGE",
},
{
"answer": "SUM > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "SUM",
},
]
self.assertListEqual(results, expected_results)
@slow
@require_torch
def test_integration_wtq_pt_fp16(self):
self.test_integration_wtq_pt(dtype="float16")
@slow
@require_torch
def test_integration_sqa_pt(self, dtype="float32"):
table_querier = pipeline(
"table-question-answering",
model="google/tapas-base-finetuned-sqa",
tokenizer="google/tapas-base-finetuned-sqa",
dtype=dtype,
)
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
}
queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"]
results = table_querier(data, queries, sequential=True)
expected_results = [
{"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]},
{"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]},
{"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]},
]
self.assertListEqual(results, expected_results)
@slow
@require_torch
def test_integration_sqa_pt_fp16(self):
self.test_integration_sqa_pt(dtype="float16")
@slow
@require_torch
def test_large_model_pt_tapex(self, dtype="float32"):
model_id = "microsoft/tapex-large-finetuned-wtq"
table_querier = pipeline(
"table-question-answering",
model=model_id,
dtype=dtype,
)
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
}
queries = [
"How many movies has George Clooney played in?",
"How old is Mr Clooney ?",
"What's the date of birth of Leonardo ?",
]
results = table_querier(data, queries, sequential=True)
expected_results = [
{"answer": " 69"},
{"answer": " 59"},
{"answer": " 10 june 1996"},
]
self.assertListEqual(results, expected_results)
| transformers/tests/pipelines/test_pipelines_table_question_answering.py/0 | {
"file_path": "transformers/tests/pipelines/test_pipelines_table_question_answering.py",
"repo_id": "transformers",
"token_count": 7888
} | 576 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AwqConfig, OPTForCausalLM
from transformers.testing_utils import (
backend_empty_cache,
get_device_properties,
require_accelerate,
require_auto_awq,
require_flash_attn,
require_intel_extension_for_pytorch,
require_torch_accelerator,
require_torch_gpu,
require_torch_multi_accelerator,
require_torch_multi_gpu,
slow,
torch_device,
)
from transformers.utils import is_accelerate_available, is_torch_available
if is_torch_available():
import torch
if is_accelerate_available():
from accelerate import init_empty_weights
@require_torch_accelerator
class AwqConfigTest(unittest.TestCase):
def test_wrong_backend(self):
"""
Simple test that checks if a user passes a wrong backend an error is raised
"""
# This should work fine
_ = AwqConfig(bits=4)
with self.assertRaises(ValueError):
AwqConfig(bits=4, backend="")
# These should work fine
_ = AwqConfig(bits=4, version="GEMM")
_ = AwqConfig(bits=4, version="gemm")
with self.assertRaises(ValueError):
AwqConfig(bits=4, backend="unexisting-backend")
# Only cuda and xpu devices can run this function
support_llm_awq = False
device_type, major, _ = get_device_properties()
if device_type == "cuda" and major >= 8:
support_llm_awq = True
elif device_type == "xpu":
support_llm_awq = True
if support_llm_awq:
# LLMAWQ should work on an A100
AwqConfig(bits=4, backend="llm-awq")
else:
# LLMAWQ does not work on a T4
with self.assertRaises(ValueError):
AwqConfig(bits=4, backend="llm-awq")
def test_to_dict(self):
"""
Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object
"""
quantization_config = AwqConfig(bits=4)
config_to_dict = quantization_config.to_dict()
for key in config_to_dict:
self.assertEqual(getattr(quantization_config, key), config_to_dict[key])
def test_from_dict(self):
"""
Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict
"""
dict = {"bits": 2, "zero_point": False, "backend": "autoawq"}
quantization_config = AwqConfig.from_dict(dict)
self.assertEqual(dict["bits"], quantization_config.bits)
self.assertEqual(dict["zero_point"], quantization_config.zero_point)
self.assertEqual(dict["backend"], quantization_config.backend)
@slow
@require_torch_accelerator
@require_auto_awq
@require_accelerate
class AwqTest(unittest.TestCase):
model_name = "TheBloke/Mistral-7B-v0.1-AWQ"
dummy_transformers_model_name = "bigscience/bloom-560m"
model_with_no_k_proj_quantized = "hf-internal-testing/opt-125m-awq-no-k-proj"
input_text = "Hello my name is"
EXPECTED_OUTPUT = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Journalism and minoring in Spanish"
EXPECTED_OUTPUT_BF16 = "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Journalism and minoring in Spanish"
EXPECTED_OUTPUT_EXLLAMA = [
"Hello my name is Katie and I am a 20 year old student from the UK. I am currently studying for a degree in English Literature and History at the University of York. I am a very out",
"Hello my name is Katie and I am a 20 year old student from the UK. I am currently studying for a degree in English Literature and History at the University of York. I am a very creative",
]
device_map = torch_device
# called only once for all test in this class
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(cls.model_name, device_map=cls.device_map)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV
from transformers.integrations.awq import replace_with_awq_linear
model_id = "facebook/opt-350m"
config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5")
quantization_config = AwqConfig(bits=4)
with init_empty_weights():
model = OPTForCausalLM(config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model, _ = replace_with_awq_linear(model, quantization_config=quantization_config)
nb_awq_linear = 0
for module in model.modules():
if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)):
nb_awq_linear += 1
self.assertEqual(nb_linears, nb_awq_linear)
# Try with `modules_not_to_convert`
with init_empty_weights():
model = OPTForCausalLM(config)
model, _ = replace_with_awq_linear(
model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"]
)
nb_awq_linear = 0
for module in model.modules():
if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)):
nb_awq_linear += 1
self.assertEqual(nb_linears - 1, nb_awq_linear)
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=40)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_raise_if_non_quantized(self):
model_id = "facebook/opt-125m"
quantization_config = AwqConfig(bits=4)
with self.assertRaises(ValueError):
_ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config)
def test_quantized_model_bf16(self):
"""
Simple test that checks if the quantized model is working properly with bf16
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, dtype=torch.bfloat16).to(torch_device)
output = quantized_model.generate(**input_ids, max_new_tokens=40)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_BF16)
@require_torch_gpu
def test_quantized_model_exllama(self):
"""
Simple test that checks if the quantized model is working properly with exllama backend
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantization_config = AwqConfig(version="exllama")
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=quantization_config, device_map=torch_device
)
output = quantized_model.generate(**input_ids, max_new_tokens=40)
self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_EXLLAMA)
def test_quantized_model_no_device_map(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name).to(torch_device)
output = quantized_model.generate(**input_ids, max_new_tokens=40)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=40)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_multi_accelerator
def test_quantized_model_multi_accelerator(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto")
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=40)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_quantized_model_no_k_proj_quantized(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
"""
dummy_input = torch.LongTensor([[0, 1, 0]]).to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(self.model_with_no_k_proj_quantized).to(torch_device)
self.assertTrue(isinstance(quantized_model.model.decoder.layers[0].self_attn.k_proj, torch.nn.Linear))
self.assertFalse(isinstance(quantized_model.model.decoder.layers[0].self_attn.v_proj, torch.nn.Linear))
EXPECTED_OUTPUT = torch.LongTensor([[0, 1, 0, 50118, 50118, 133, 248, 12, 134, 16, 10, 372, 2031]]).to(
torch_device
)
output = quantized_model.generate(dummy_input, max_new_tokens=10)
self.assertTrue((EXPECTED_OUTPUT == output).all())
@slow
@require_torch_accelerator
@require_auto_awq
@require_accelerate
class AwqFusedTest(unittest.TestCase):
model_name = "TheBloke/Mistral-7B-OpenOrca-AWQ"
model_revision = "7048b2af77d0dd1c81b000b19d73f9cc8950b510"
custom_mapping_model_id = "TheBloke/Mistral-7B-v0.1-AWQ"
custom_model_revision = "f186bcfa9edbe2a4334262ec1e67f23e53ed1ae7"
mixtral_model_name = "casperhansen/mixtral-instruct-awq"
mixtral_model_revision = "87dd4ec502dde74fb3a624835c776b000d190c3b"
multi_modal_model_name = "ybelkada/llava-1.5-7b-hf-awq"
multi_modal_model_code_revision = "ad108a50f5b9e681bdd7378409f57b7fa59a7442"
prompt = (
"You're standing on the surface of the Earth. "
"You walk one mile south, one mile west and one mile north. "
"You end up exactly where you started. Where are you?"
)
EXPECTED_GENERATION = prompt + "\n\nYou're at the center of a square."
EXPECTED_GENERATION_CUSTOM_MODEL = "Hello,\n\nI have a problem with my 20"
EXPECTED_GENERATION_MIXTRAL = prompt + " You're on the North Pole.\n\nThe"
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def _check_fused_modules(self, model):
has_fused_modules = False
fused_modules_name = ["QuantAttentionFused", "QuantFusedMLP", "FasterTransformerRMSNorm"]
for _, module in model.named_modules():
if module.__class__.__name__ in fused_modules_name:
has_fused_modules = True
break
self.assertTrue(has_fused_modules, "Modules fusing not performed correctly!")
def test_raise_save_pretrained(self):
"""
Test that `save_pretrained` is effectively blocked for fused models
"""
quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True)
model = AutoModelForCausalLM.from_pretrained(
self.model_name,
quantization_config=quantization_config,
revision=self.model_revision,
).to(torch_device)
self._check_fused_modules(model)
with self.assertRaises(ValueError), tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
def test_fused_modules_to_not_convert(self):
"""
Test if fused + modules to_not_covnert work as expected
"""
model_id = "hf-internal-testing/Mixtral-tiny-AWQ"
quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True)
model = AutoModelForCausalLM.from_pretrained(
model_id,
quantization_config=quantization_config,
).to(torch_device)
# Check if model has been correctly fused
self._check_fused_modules(model)
# Checks if the modules_to_not_convert (here gate layer) is a Linear
self.assertTrue(isinstance(model.model.layers[0].block_sparse_moe.gate, torch.nn.Linear))
@unittest.skipIf(
get_device_properties()[0] == "cuda" and get_device_properties()[1] < 8,
"Skipping because RuntimeError: FlashAttention only supports Ampere GPUs or newer, so not supported on GPU with capability < 8.0",
)
@require_flash_attn
@require_torch_gpu
def test_generation_fused(self):
"""
Test generation quality for fused models - single batch case
"""
quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True)
model = AutoModelForCausalLM.from_pretrained(
self.model_name,
quantization_config=quantization_config,
revision=self.model_revision,
).to(torch_device)
self._check_fused_modules(model)
tokenizer = AutoTokenizer.from_pretrained(self.model_name, revision=self.model_revision)
inputs = tokenizer(self.prompt, return_tensors="pt").to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=12)
self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION)
@require_flash_attn
@require_torch_gpu
@unittest.skipIf(
get_device_properties()[0] == "cuda" and get_device_properties()[1] < 8,
"Skipping because RuntimeError: FlashAttention only supports Ampere GPUs or newer, so not supported on GPU with capability < 8.0",
)
def test_generation_fused_batched(self):
"""
Test generation quality for fused models - multi batch case
"""
quantization_config = AwqConfig(bits=4, fuse_max_seq_len=128, do_fuse=True)
model = AutoModelForCausalLM.from_pretrained(
self.model_name,
quantization_config=quantization_config,
revision=self.model_revision,
).to(torch_device)
self._check_fused_modules(model)
tokenizer = AutoTokenizer.from_pretrained(self.model_name, revision=self.model_revision)
tokenizer.pad_token_id = tokenizer.eos_token_id
inputs = tokenizer([self.prompt, self.prompt], return_tensors="pt", padding=True).to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=12)
self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION)
def test_generation_llava_fused(self):
from transformers import pipeline
quantization_config = AwqConfig(do_fuse=True, fuse_max_seq_len=2048)
pipe = pipeline(
"image-to-text",
model=self.multi_modal_model_name,
device=0,
model_kwargs={
"quantization_config": quantization_config,
},
revision=self.multi_modal_model_code_revision,
)
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png"
prompt = "USER: <image>\nCan you please describe this image?\nASSISTANT:"
outputs = pipe(url, prompt=prompt, generate_kwargs={"max_new_tokens": 100})
EXPECTED_OUTPUT = "USER: \nCan you please describe this image?\nASSISTANT: The image features a brown and white cat sitting on a green surface, possibly a carpet or a grassy area. The cat is holding a red ball in its paws, seemingly playing with it. The cat appears to be focused on the ball, possibly preparing to play or just enjoying the toy."
self.assertEqual(outputs[0]["generated_text"], EXPECTED_OUTPUT)
@require_flash_attn
@require_torch_multi_gpu
@unittest.skipIf(
get_device_properties()[0] == "cuda" and get_device_properties()[1] < 8,
"Skipping because RuntimeError: FlashAttention only supports Ampere GPUs or newer, so not supported on GPU with capability < 8.0",
)
def test_generation_custom_model(self):
"""
Test generation quality for fused models using custom fused map.
"""
quantization_config = AwqConfig(
bits=4,
fuse_max_seq_len=512,
modules_to_fuse={
"attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
"mlp": ["gate_proj", "up_proj", "down_proj"],
"layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
"use_alibi": False,
"hidden_size": 4096,
"num_attention_heads": 32,
"num_key_value_heads": 8,
},
)
model = AutoModelForCausalLM.from_pretrained(
self.custom_mapping_model_id,
quantization_config=quantization_config,
device_map="balanced",
revision=self.custom_model_revision,
)
self._check_fused_modules(model)
tokenizer = AutoTokenizer.from_pretrained(self.custom_mapping_model_id, revision=self.custom_model_revision)
prompt = "Hello"
inputs = tokenizer(prompt, return_tensors="pt").to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=12)
self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION_CUSTOM_MODEL)
@require_flash_attn
@require_torch_multi_gpu
@unittest.skip(reason="Not enough GPU memory on CI runners")
def test_generation_mixtral_fused(self):
"""
Text generation test for Mixtral + AWQ + fused
"""
quantization_config = AwqConfig(bits=4, fuse_max_seq_len=1024, do_fuse=True)
model = AutoModelForCausalLM.from_pretrained(
self.mixtral_model_name,
quantization_config=quantization_config,
device_map="auto",
revision=self.mixtral_model_revision,
)
tokenizer = AutoTokenizer.from_pretrained(self.mixtral_model_name)
tokenizer.pad_token = tokenizer.eos_token
inputs = tokenizer([self.prompt, self.prompt], return_tensors="pt", padding=True).to(torch_device)
outputs = model.generate(**inputs, max_new_tokens=12)
self.assertEqual(tokenizer.decode(outputs[0], skip_special_tokens=True), self.EXPECTED_GENERATION_MIXTRAL)
@slow
@require_torch_accelerator
@require_auto_awq
@require_accelerate
class AwqScaleTest(unittest.TestCase):
model_name = "TechxGenus/starcoder2-3b-AWQ"
def test_load_quantized_model(self):
from awq.modules.act import ScaledActivation
"""
Simple test that checks if the scales have been replaced in the quantized model
"""
quantized_model = AutoModelForCausalLM.from_pretrained(
"TechxGenus/starcoder2-3b-AWQ", dtype=torch.float16, device_map=torch_device
)
self.assertTrue(isinstance(quantized_model.model.layers[0].mlp.act, ScaledActivation))
@slow
@require_auto_awq
@require_accelerate
@require_intel_extension_for_pytorch
class AwqIPEXTest(unittest.TestCase):
def test_quantized_model_ipex(self):
"""
Simple test that checks if the quantized model is working properly with ipex backend
"""
quantization_config = AwqConfig(version="ipex")
model = AutoModelForCausalLM.from_pretrained(
"TheBloke/TinyLlama-1.1B-Chat-v0.3-AWQ",
quantization_config=quantization_config,
device_map="cpu",
)
tokenizer = AutoTokenizer.from_pretrained("TheBloke/TinyLlama-1.1B-Chat-v0.3-AWQ")
input_ids = tokenizer.encode("How to make a cake", return_tensors="pt")
pad_token_id = tokenizer.eos_token_id
output = model.generate(input_ids, do_sample=False, max_length=20, pad_token_id=pad_token_id)
print(tokenizer.decode(output[0], skip_special_tokens=True))
expected_output = (
"How to make a cake with a round tin?\nHow to make a cake with a round tin?\n1. Preheat the oven to 180°"
)
self.assertIn(tokenizer.decode(output[0], skip_special_tokens=True), expected_output)
| transformers/tests/quantization/autoawq/test_awq.py/0 | {
"file_path": "transformers/tests/quantization/autoawq/test_awq.py",
"repo_id": "transformers",
"token_count": 9308
} | 577 |
# Copyright 2025 Advanced Micro Devices, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, GenerationConfig, QuarkConfig
from transformers.testing_utils import (
cleanup,
is_torch_available,
require_accelerate,
require_quark,
require_torch_gpu,
require_torch_multi_gpu,
slow,
torch_device,
)
from transformers.utils.import_utils import is_quark_available
if is_torch_available():
import torch
if is_quark_available():
from quark.torch.export.nn.modules.qparamslinear import QParamsLinear
@require_quark
class QuarkConfigTest(unittest.TestCase):
def test_commmon_args(self):
config = AutoConfig.from_pretrained("amd/Llama-3.1-8B-Instruct-w-int8-a-int8-sym-test")
QuarkConfig(**config.quantization_config)
@slow
@require_quark
@require_torch_gpu
class QuarkTest(unittest.TestCase):
reference_model_name = "unsloth/Meta-Llama-3.1-8B-Instruct"
quantized_model_name = "amd/Llama-3.1-8B-Instruct-w-int8-a-int8-sym-test"
input_text = "Today I am in Paris and"
EXPECTED_OUTPUTS = set()
EXPECTED_OUTPUTS.add("Today I am in Paris and I am not in Paris, France\nToday I am in Paris, Illinois")
EXPECTED_OUTPUTS.add("Today I am in Paris and I am enjoying the city of light. I am not just any ordinary Paris")
EXPECTED_OUTPUTS.add("Today I am in Paris and I am enjoying my day off! The sun is shining, the birds are")
EXPECTED_OUTPUTS.add("Today I am in Paris and I'm here to tell you about it. It's a beautiful day,")
EXPECTED_OUTPUTS.add("Today I am in Paris and I am not in Paris at all! I am not in Paris, but")
EXPECTED_RELATIVE_DIFFERENCE = 1.66
device_map = None
@classmethod
def setUpClass(cls):
"""
Setup reference & quantized model
"""
cls.model_fp16 = AutoModelForCausalLM.from_pretrained(
cls.reference_model_name, dtype=torch.float16, device_map=cls.device_map
)
cls.mem_fp16 = cls.model_fp16.get_memory_footprint()
cls.tokenizer = AutoTokenizer.from_pretrained(cls.reference_model_name, use_fast=True)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.quantized_model_name,
dtype=torch.float16,
device_map=cls.device_map,
)
def tearDown(self):
r"""
TearDown function needs to be called at the end of each test to free the accelerator memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
cleanup(torch_device, gc_collect=True)
def test_memory_footprint(self):
mem_quantized = self.quantized_model.get_memory_footprint()
self.assertTrue(self.mem_fp16 / mem_quantized > self.EXPECTED_RELATIVE_DIFFERENCE)
def test_device_and_dtype_assignment(self):
r"""
Test whether trying to cast (or assigning a device to) a model after quantization will throw an error.
Checks also if other models are casted correctly .
"""
# This should work
if self.device_map is None:
_ = self.quantized_model.to(0)
with self.assertRaises(ValueError):
# Tries with a `dtype``
self.quantized_model.to(torch.float16)
def test_original_dtype(self):
r"""
A simple test to check if the model successfully stores the original dtype
"""
self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype"))
self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype"))
self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16)
self.assertTrue(isinstance(self.quantized_model.model.layers[0].mlp.gate_proj, QParamsLinear))
def check_inference_correctness(self, model):
r"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
gen_config = GenerationConfig(
max_new_tokens=15,
min_new_tokens=15,
use_cache=True,
num_beams=1,
do_sample=False,
)
# Check the exactness of the results
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), generation_config=gen_config)
# Get the generation
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
def test_generate_quality(self):
"""
Simple test to check the quality of the model by comparing the generated tokens with the expected tokens
"""
if self.device_map is None:
self.check_inference_correctness(self.quantized_model.to(0))
else:
self.check_inference_correctness(self.quantized_model)
@require_accelerate
@require_torch_multi_gpu
@require_quark
class QuarkTestDeviceMap(QuarkTest):
device_map = "auto"
| transformers/tests/quantization/quark_integration/test_quark.py/0 | {
"file_path": "transformers/tests/quantization/quark_integration/test_quark.py",
"repo_id": "transformers",
"token_count": 2324
} | 578 |
import json
import logging
import os
import subprocess
from argparse import ArgumentParser
logger = logging.getLogger(__name__)
def parse_args():
parser = ArgumentParser()
parsed, unknown = parser.parse_known_args()
for arg in unknown:
if arg.startswith(("-", "--")):
parser.add_argument(arg.split("=")[0])
return parser.parse_args()
def main():
args = parse_args()
port = 8888
num_gpus = int(os.environ["SM_NUM_GPUS"])
hosts = json.loads(os.environ["SM_HOSTS"])
num_nodes = len(hosts)
current_host = os.environ["SM_CURRENT_HOST"]
rank = hosts.index(current_host)
os.environ["NCCL_DEBUG"] = "INFO"
if num_nodes > 1:
cmd = f"""python -m torch.distributed.launch \
--nnodes={num_nodes} \
--node_rank={rank} \
--nproc_per_node={num_gpus} \
--master_addr={hosts[0]} \
--master_port={port} \
./run_glue.py \
{"".join([f" --{parameter} {value}" for parameter, value in args.__dict__.items()])}"""
else:
cmd = f"""python -m torch.distributed.launch \
--nproc_per_node={num_gpus} \
./run_glue.py \
{"".join([f" --{parameter} {value}" for parameter, value in args.__dict__.items()])}"""
try:
subprocess.run(cmd, shell=True)
except Exception as e:
logger.info(e)
if __name__ == "__main__":
main()
| transformers/tests/sagemaker/scripts/pytorch/run_ddp.py/0 | {
"file_path": "transformers/tests/sagemaker/scripts/pytorch/run_ddp.py",
"repo_id": "transformers",
"token_count": 694
} | 579 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import random
import tempfile
from pathlib import Path
from typing import Optional
import numpy as np
from huggingface_hub import hf_hub_download
from parameterized import parameterized
from transformers.models.auto.processing_auto import processor_class_from_name
from transformers.processing_utils import Unpack
from transformers.testing_utils import (
check_json_file_has_correct_format,
require_av,
require_librosa,
require_torch,
require_vision,
)
from transformers.utils import is_av_available, is_torch_available, is_vision_available
global_rng = random.Random()
if is_vision_available():
from PIL import Image
if is_torch_available():
import torch
MODALITY_INPUT_DATA = {
"images": [
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png",
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png",
],
"videos": [
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Big_Buck_Bunny_720_10s_10MB.mp4",
["https://www.ilankelman.org/stopsigns/australia.jpg", "https://www.ilankelman.org/stopsigns/australia.jpg"],
],
"audio": [
"https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/glass-breaking-151256.mp3",
"https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav",
],
}
if is_av_available():
from transformers.video_utils import load_video
# load a video file in memory for testing
video, _ = load_video(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Big_Buck_Bunny_720_10s_10MB.mp4"
)
MODALITY_INPUT_DATA["videos"].append(video)
def prepare_image_inputs():
"""This function prepares a list of PIL images"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
# Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list
def floats_list(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
values = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_vision
class ProcessorTesterMixin:
processor_class = None
text_input_name = "input_ids"
images_input_name = "pixel_values"
videos_input_name = "pixel_values_videos"
audio_input_name = "input_features"
@staticmethod
def prepare_processor_dict():
return {}
def get_component(self, attribute, **kwargs):
assert attribute in self.processor_class.attributes
component_class_name = getattr(self.processor_class, f"{attribute}_class")
if isinstance(component_class_name, tuple):
if attribute == "image_processor":
# TODO: @yoni, change logic in v4.52 (when use_fast set to True by default)
component_class_name = component_class_name[0]
else:
component_class_name = component_class_name[-1]
component_class = processor_class_from_name(component_class_name)
component = component_class.from_pretrained(self.tmpdirname, **kwargs) # noqa
if "tokenizer" in attribute and not component.pad_token:
component.pad_token = "[TEST_PAD]"
if component.pad_token_id is None:
component.pad_token_id = 0
return component
def prepare_components(self):
components = {}
for attribute in self.processor_class.attributes:
component = self.get_component(attribute)
components[attribute] = component
return components
def get_processor(self):
components = self.prepare_components()
processor = self.processor_class(**components, **self.prepare_processor_dict())
return processor
def prepare_text_inputs(self, batch_size: Optional[int] = None, modality: Optional[str] = None):
if modality is not None:
special_token_to_add = getattr(self, f"{modality}_token", "")
else:
special_token_to_add = ""
if batch_size is None:
return f"lower newer {special_token_to_add}"
if batch_size < 1:
raise ValueError("batch_size must be greater than 0")
if batch_size == 1:
return [f"lower newer {special_token_to_add}"]
return [f"lower newer {special_token_to_add}", f" {special_token_to_add} upper older longer string"] + [
f"lower newer {special_token_to_add}"
] * (batch_size - 2)
@require_vision
def prepare_image_inputs(self, batch_size: Optional[int] = None):
"""This function prepares a list of PIL images for testing"""
if batch_size is None:
return prepare_image_inputs()[0]
if batch_size < 1:
raise ValueError("batch_size must be greater than 0")
return prepare_image_inputs() * batch_size
@require_vision
def prepare_video_inputs(self, batch_size: Optional[int] = None):
"""This function prepares a list of numpy videos."""
video_input = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] * 8
if batch_size is None:
return video_input
return [video_input] * batch_size
def prepare_audio_inputs(self, batch_size: Optional[int] = None):
"""This function prepares a list of numpy audio."""
raw_speech = floats_list((1, 1000))
raw_speech = [np.asarray(audio) for audio in raw_speech]
if batch_size is None:
return raw_speech
return raw_speech * batch_size
def test_processor_to_json_string(self):
processor = self.get_processor()
obj = json.loads(processor.to_json_string())
for key, value in self.prepare_processor_dict().items():
# Chat template is saved as a separate file
if key not in "chat_template":
# json converts dict keys to str, but some processors force convert back to int when init
if (
isinstance(obj[key], dict)
and isinstance(list(obj[key].keys())[0], str)
and isinstance(list(value.keys())[0], int)
):
obj[key] = {int(k): v for k, v in obj[key].items()}
self.assertEqual(obj[key], value)
self.assertEqual(getattr(processor, key, None), value)
def test_processor_from_and_save_pretrained(self):
processor_first = self.get_processor()
with tempfile.TemporaryDirectory() as tmpdirname:
saved_files = processor_first.save_pretrained(tmpdirname)
if len(saved_files) > 0:
check_json_file_has_correct_format(saved_files[0])
processor_second = self.processor_class.from_pretrained(tmpdirname)
self.assertEqual(processor_second.to_dict(), processor_first.to_dict())
for attribute in processor_first.attributes:
attribute_first = getattr(processor_first, attribute)
attribute_second = getattr(processor_second, attribute)
# tokenizer repr contains model-path from where we loaded
if "tokenizer" not in attribute:
self.assertEqual(repr(attribute_first), repr(attribute_second))
def test_model_input_names(self):
processor = self.get_processor()
text = self.prepare_text_inputs(modality="image")
image_input = self.prepare_image_inputs()
video_inputs = self.prepare_video_inputs()
audio_inputs = self.prepare_audio_inputs()
inputs_dict = {"text": text, "images": image_input, "videos": video_inputs, "audio": audio_inputs}
call_signature = inspect.signature(processor.__call__)
input_args = [param.name for param in call_signature.parameters.values()]
inputs_dict = {k: v for k, v in inputs_dict.items() if k in input_args}
inputs = processor(**inputs_dict, return_tensors="pt")
self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
# These kwargs-related tests ensure that processors are correctly instantiated.
# they need to be applied only if an image_processor exists.
def skip_processor_without_typed_kwargs(self, processor):
# TODO this signature check is to test only uniformized processors.
# Once all are updated, remove it.
is_kwargs_typed_dict = False
call_signature = inspect.signature(processor.__call__)
for param in call_signature.parameters.values():
if param.kind == param.VAR_KEYWORD and param.annotation != param.empty:
is_kwargs_typed_dict = (
hasattr(param.annotation, "__origin__") and param.annotation.__origin__ == Unpack
)
if not is_kwargs_typed_dict:
self.skipTest(f"{self.processor_class} doesn't have typed kwargs.")
def test_tokenizer_defaults_preserved_by_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="image")
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input, return_tensors="pt")
self.assertEqual(inputs[self.text_input_name].shape[-1], 117)
def test_image_processor_defaults_preserved_by_image_kwargs(self):
"""
We use do_rescale=True, rescale_factor=-1 to ensure that image_processor kwargs are preserved in the processor.
We then check that the mean of the pixel_values is less than or equal to 0 after processing.
Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied.
"""
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["image_processor"] = self.get_component(
"image_processor", do_rescale=True, rescale_factor=-1
)
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="image")
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input, return_tensors="pt")
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
def test_kwargs_overrides_default_tokenizer_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", padding="longest")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="image")
image_input = self.prepare_image_inputs()
inputs = processor(
text=input_str, images=image_input, return_tensors="pt", max_length=112, padding="max_length"
)
self.assertEqual(inputs[self.text_input_name].shape[-1], 112)
def test_kwargs_overrides_default_image_processor_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["image_processor"] = self.get_component(
"image_processor", do_rescale=True, rescale_factor=1
)
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="image")
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input, do_rescale=True, rescale_factor=-1, return_tensors="pt")
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
def test_unstructured_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="image")
image_input = self.prepare_image_inputs()
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1,
padding="max_length",
max_length=76,
)
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
self.assertEqual(inputs[self.text_input_name].shape[-1], 76)
def test_unstructured_kwargs_batched(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=2, modality="image")
image_input = self.prepare_image_inputs(batch_size=2)
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1,
padding="longest",
max_length=76,
)
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
self.assertTrue(
len(inputs[self.text_input_name][0]) == len(inputs[self.text_input_name][1])
and len(inputs[self.text_input_name][1]) < 76
)
def test_doubly_passed_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = [self.prepare_text_inputs(modality="image")]
image_input = self.prepare_image_inputs()
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
images_kwargs={"do_rescale": True, "rescale_factor": -1},
do_rescale=True,
return_tensors="pt",
)
def test_args_overlap_kwargs(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_first = self.get_processor()
image_processor = processor_first.image_processor
image_processor.is_override = True
with tempfile.TemporaryDirectory() as tmpdirname:
processor_first.save_pretrained(tmpdirname)
processor_second = self.processor_class.from_pretrained(tmpdirname, image_processor=image_processor)
self.assertTrue(processor_second.image_processor.is_override)
def test_structured_kwargs_nested(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="image")
image_input = self.prepare_image_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"do_rescale": True, "rescale_factor": -1},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(text=input_str, images=image_input, **all_kwargs)
self.skip_processor_without_typed_kwargs(processor)
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
self.assertEqual(inputs[self.text_input_name].shape[-1], 76)
def test_structured_kwargs_nested_from_dict(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="image")
image_input = self.prepare_image_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"do_rescale": True, "rescale_factor": -1},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(text=input_str, images=image_input, **all_kwargs)
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
self.assertEqual(inputs[self.text_input_name].shape[-1], 76)
# text + audio kwargs testing
@require_torch
def test_tokenizer_defaults_preserved_by_kwargs_audio(self):
if "feature_extractor" not in self.processor_class.attributes:
self.skipTest(f"feature_extractor attribute not present in {self.processor_class}")
feature_extractor = self.get_component("feature_extractor")
tokenizer = self.get_component("tokenizer", max_length=300, padding="max_length")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(tokenizer=tokenizer, feature_extractor=feature_extractor, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=3, modality="audio")
raw_speech = self.prepare_audio_inputs(batch_size=3)
inputs = processor(text=input_str, audio=raw_speech, return_tensors="pt")
self.assertEqual(len(inputs[self.text_input_name][0]), 300)
@require_torch
def test_kwargs_overrides_default_tokenizer_kwargs_audio(self):
if "feature_extractor" not in self.processor_class.attributes:
self.skipTest(f"feature_extractor attribute not present in {self.processor_class}")
feature_extractor = self.get_component("feature_extractor")
tokenizer = self.get_component("tokenizer", max_length=117)
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(tokenizer=tokenizer, feature_extractor=feature_extractor, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=3, modality="audio")
raw_speech = self.prepare_audio_inputs(batch_size=3)
inputs = processor(text=input_str, audio=raw_speech, return_tensors="pt", max_length=300, padding="max_length")
self.assertEqual(len(inputs[self.text_input_name][0]), 300)
@require_torch
def test_unstructured_kwargs_audio(self):
if "feature_extractor" not in self.processor_class.attributes:
self.skipTest(f"feature_extractor attribute not present in {self.processor_class}")
feature_extractor = self.get_component("feature_extractor")
tokenizer = self.get_component("tokenizer")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(tokenizer=tokenizer, feature_extractor=feature_extractor, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=3, modality="audio")
raw_speech = self.prepare_audio_inputs(batch_size=3)
inputs = processor(text=input_str, audio=raw_speech, return_tensors="pt", max_length=300, padding="max_length")
self.assertEqual(len(inputs[self.text_input_name][0]), 300)
@require_torch
def test_doubly_passed_kwargs_audio(self):
if "feature_extractor" not in self.processor_class.attributes:
self.skipTest(f"feature_extractor attribute not present in {self.processor_class}")
feature_extractor = self.get_component("feature_extractor")
tokenizer = self.get_component("tokenizer")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(tokenizer=tokenizer, feature_extractor=feature_extractor, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=3, modality="audio")
raw_speech = self.prepare_audio_inputs(batch_size=3)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
audio=raw_speech,
text_kwargs={"padding": "max_length"},
padding="max_length",
)
@require_torch
@require_vision
def test_structured_kwargs_audio_nested(self):
if "feature_extractor" not in self.processor_class.attributes:
self.skipTest(f"feature_extractor attribute not present in {self.processor_class}")
feature_extractor = self.get_component("feature_extractor")
tokenizer = self.get_component("tokenizer", max_length=117)
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(tokenizer=tokenizer, feature_extractor=feature_extractor, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=3, modality="audio")
raw_speech = self.prepare_audio_inputs(batch_size=3)
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"text_kwargs": {"padding": "max_length", "max_length": 76},
"audio_kwargs": {"padding": "max_length", "max_length": 300},
}
inputs = processor(text=input_str, audio=raw_speech, **all_kwargs)
self.assertEqual(len(inputs[self.text_input_name][0]), 76)
def test_tokenizer_defaults_preserved_by_kwargs_video(self):
if "video_processor" not in self.processor_class.attributes:
self.skipTest(f"video_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=167, padding="max_length")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="video")
video_input = self.prepare_video_inputs()
inputs = processor(text=input_str, videos=video_input, do_sample_frames=False, return_tensors="pt")
self.assertEqual(inputs[self.text_input_name].shape[-1], 167)
def test_video_processor_defaults_preserved_by_video_kwargs(self):
"""
We use do_rescale=True, rescale_factor=-1 to ensure that image_processor kwargs are preserved in the processor.
We then check that the mean of the pixel_values is less than or equal to 0 after processing.
Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied.
"""
if "video_processor" not in self.processor_class.attributes:
self.skipTest(f"video_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["video_processor"] = self.get_component(
"video_processor", do_rescale=True, rescale_factor=-1
)
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=167, padding="max_length")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="video")
video_input = self.prepare_video_inputs()
inputs = processor(text=input_str, videos=video_input, do_sample_frames=False, return_tensors="pt")
self.assertLessEqual(inputs[self.videos_input_name][0].mean(), 0)
def test_kwargs_overrides_default_tokenizer_kwargs_video(self):
if "video_processor" not in self.processor_class.attributes:
self.skipTest(f"video_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["tokenizer"] = self.get_component("tokenizer", padding="longest")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="video")
video_input = self.prepare_video_inputs()
inputs = processor(
text=input_str,
videos=video_input,
do_sample_frames=False,
return_tensors="pt",
max_length=162,
padding="max_length",
)
self.assertEqual(inputs[self.text_input_name].shape[-1], 162)
def test_kwargs_overrides_default_video_processor_kwargs(self):
if "video_processor" not in self.processor_class.attributes:
self.skipTest(f"video_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_components["video_processor"] = self.get_component(
"video_processor", do_rescale=True, rescale_factor=1
)
processor_components["tokenizer"] = self.get_component("tokenizer", max_length=167, padding="max_length")
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="video")
video_input = self.prepare_video_inputs()
inputs = processor(
text=input_str,
videos=video_input,
do_sample_frames=False,
do_rescale=True,
rescale_factor=-1,
return_tensors="pt",
)
self.assertLessEqual(inputs[self.videos_input_name][0].mean(), 0)
def test_unstructured_kwargs_video(self):
if "video_processor" not in self.processor_class.attributes:
self.skipTest(f"video_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="video")
video_input = self.prepare_video_inputs()
inputs = processor(
text=input_str,
videos=video_input,
do_sample_frames=False,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1,
padding="max_length",
max_length=176,
)
self.assertLessEqual(inputs[self.videos_input_name][0].mean(), 0)
self.assertEqual(inputs[self.text_input_name].shape[-1], 176)
def test_unstructured_kwargs_batched_video(self):
if "video_processor" not in self.processor_class.attributes:
self.skipTest(f"video_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=2, modality="video")
video_input = self.prepare_video_inputs(batch_size=2)
inputs = processor(
text=input_str,
videos=video_input,
do_sample_frames=False,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1,
padding="longest",
max_length=176,
)
self.assertLessEqual(inputs[self.videos_input_name][0].mean(), 0)
self.assertTrue(
len(inputs[self.text_input_name][0]) == len(inputs[self.text_input_name][1])
and len(inputs[self.text_input_name][1]) < 176
)
def test_doubly_passed_kwargs_video(self):
if "video_processor" not in self.processor_class.attributes:
self.skipTest(f"video_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = [self.prepare_text_inputs(modality="video")]
video_input = self.prepare_video_inputs()
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
videos=video_input,
do_sample_frames=False,
videos_kwargs={"do_rescale": True, "rescale_factor": -1},
do_rescale=True,
return_tensors="pt",
)
def test_structured_kwargs_nested_video(self):
if "video_processor" not in self.processor_class.attributes:
self.skipTest(f"video_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="video")
video_input = self.prepare_video_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"videos_kwargs": {"do_rescale": True, "rescale_factor": -1, "do_sample_frames": False},
"text_kwargs": {"padding": "max_length", "max_length": 176},
}
inputs = processor(text=input_str, videos=video_input, **all_kwargs)
self.skip_processor_without_typed_kwargs(processor)
self.assertLessEqual(inputs[self.videos_input_name][0].mean(), 0)
self.assertEqual(inputs[self.text_input_name].shape[-1], 176)
def test_structured_kwargs_nested_from_dict_video(self):
if "video_processor" not in self.processor_class.attributes:
self.skipTest(f"video_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="video")
video_input = self.prepare_video_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"videos_kwargs": {"do_rescale": True, "rescale_factor": -1, "do_sample_frames": False},
"text_kwargs": {"padding": "max_length", "max_length": 176},
}
inputs = processor(text=input_str, videos=video_input, **all_kwargs)
self.assertLessEqual(inputs[self.videos_input_name][0].mean(), 0)
self.assertEqual(inputs[self.text_input_name].shape[-1], 176)
# TODO: the same test, but for audio + text processors that have strong overlap in kwargs
# TODO (molbap) use the same structure of attribute kwargs for other tests to avoid duplication
def test_overlapping_text_image_kwargs_handling(self):
if "image_processor" not in self.processor_class.attributes:
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor = self.processor_class(**processor_components)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(modality="image")
image_input = self.prepare_image_inputs()
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
padding="max_length",
text_kwargs={"padding": "do_not_pad"},
)
def test_overlapping_text_audio_kwargs_handling(self):
"""
Checks that `padding`, or any other overlap arg between audio extractor and tokenizer
is be passed to only text and ignored for audio for BC purposes
"""
if "feature_extractor" not in self.processor_class.attributes:
self.skipTest(f"feature_extractor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=3, modality="audio")
audio_lengths = [4000, 8000, 16000, 32000]
raw_speech = [np.asarray(audio)[:length] for audio, length in zip(floats_list((3, 32_000)), audio_lengths)]
# padding = True should not raise an error and will if the audio processor popped its value to None
_ = processor(text=input_str, audio=raw_speech, padding=True, return_tensors="pt")
def test_prepare_and_validate_optional_call_args(self):
processor = self.get_processor()
optional_call_args_name = getattr(processor, "optional_call_args", [])
num_optional_call_args = len(optional_call_args_name)
if num_optional_call_args == 0:
self.skipTest("No optional call args")
# test all optional call args are given
optional_call_args = processor.prepare_and_validate_optional_call_args(
*(f"optional_{i}" for i in range(num_optional_call_args))
)
self.assertEqual(
optional_call_args, {arg_name: f"optional_{i}" for i, arg_name in enumerate(optional_call_args_name)}
)
# test only one optional call arg is given
optional_call_args = processor.prepare_and_validate_optional_call_args("optional_1")
self.assertEqual(optional_call_args, {optional_call_args_name[0]: "optional_1"})
# test no optional call arg is given
optional_call_args = processor.prepare_and_validate_optional_call_args()
self.assertEqual(optional_call_args, {})
# test too many optional call args are given
with self.assertRaises(ValueError):
processor.prepare_and_validate_optional_call_args(
*(f"optional_{i}" for i in range(num_optional_call_args + 1))
)
def test_chat_template_save_loading(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
signature = inspect.signature(processor.__init__)
if "chat_template" not in {*signature.parameters.keys()}:
self.skipTest("Processor doesn't accept chat templates at input")
existing_tokenizer_template = getattr(processor.tokenizer, "chat_template", None)
processor.chat_template = "test template"
with tempfile.TemporaryDirectory() as tmpdirname:
processor.save_pretrained(tmpdirname, save_jinja_files=False)
self.assertTrue(Path(tmpdirname, "chat_template.json").is_file())
self.assertFalse(Path(tmpdirname, "chat_template.jinja").is_file())
reloaded_processor = self.processor_class.from_pretrained(tmpdirname)
self.assertEqual(processor.chat_template, reloaded_processor.chat_template)
# When we don't use single-file chat template saving, processor and tokenizer chat templates
# should remain separate
self.assertEqual(getattr(reloaded_processor.tokenizer, "chat_template", None), existing_tokenizer_template)
with tempfile.TemporaryDirectory() as tmpdirname:
processor.save_pretrained(tmpdirname)
self.assertTrue(Path(tmpdirname, "chat_template.jinja").is_file())
self.assertFalse(Path(tmpdirname, "chat_template.json").is_file())
self.assertFalse(Path(tmpdirname, "additional_chat_templates").is_dir())
reloaded_processor = self.processor_class.from_pretrained(tmpdirname)
self.assertEqual(processor.chat_template, reloaded_processor.chat_template)
# When we save as single files, tokenizers and processors share a chat template, which means
# the reloaded tokenizer should get the chat template as well
self.assertEqual(reloaded_processor.chat_template, reloaded_processor.tokenizer.chat_template)
with tempfile.TemporaryDirectory() as tmpdirname:
processor.chat_template = {"default": "a", "secondary": "b"}
processor.save_pretrained(tmpdirname)
self.assertTrue(Path(tmpdirname, "chat_template.jinja").is_file())
self.assertFalse(Path(tmpdirname, "chat_template.json").is_file())
self.assertTrue(Path(tmpdirname, "additional_chat_templates").is_dir())
reloaded_processor = self.processor_class.from_pretrained(tmpdirname)
self.assertEqual(processor.chat_template, reloaded_processor.chat_template)
# When we save as single files, tokenizers and processors share a chat template, which means
# the reloaded tokenizer should get the chat template as well
self.assertEqual(reloaded_processor.chat_template, reloaded_processor.tokenizer.chat_template)
with self.assertRaises(ValueError):
# Saving multiple templates in the legacy format is not permitted
with tempfile.TemporaryDirectory() as tmpdirname:
processor.chat_template = {"default": "a", "secondary": "b"}
processor.save_pretrained(tmpdirname, save_jinja_files=False)
@require_torch
def _test_apply_chat_template(
self,
modality: str,
batch_size: int,
return_tensors: str,
input_name: str,
processor_name: str,
input_data: list[str],
):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
if processor_name not in self.processor_class.attributes:
self.skipTest(f"{processor_name} attribute not present in {self.processor_class}")
# some models have only Fast image processor
if getattr(processor, processor_name).__class__.__name__.endswith("Fast"):
return_tensors = "pt"
batch_messages = [
[
{
"role": "user",
"content": [{"type": "text", "text": "Describe this."}],
},
]
] * batch_size
# Test that jinja can be applied
formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), batch_size)
# Test that tokenizing with template and directly with `self.tokenizer` gives same output
formatted_prompt_tokenized = processor.apply_chat_template(
batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors
)
add_special_tokens = True
if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token):
add_special_tokens = False
tok_output = processor.tokenizer(
formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens
)
expected_output = tok_output.input_ids
self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist())
# Test that kwargs passed to processor's `__call__` are actually used
tokenized_prompt_100 = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
truncation=True,
return_tensors=return_tensors,
max_length=100,
)
self.assertEqual(len(tokenized_prompt_100[0]), 100)
# Test that `return_dict=True` returns text related inputs in the dict
out_dict_text = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
)
self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"]))
self.assertEqual(len(out_dict_text["input_ids"]), batch_size)
self.assertEqual(len(out_dict_text["attention_mask"]), batch_size)
# Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict
for idx, url in enumerate(input_data[:batch_size]):
batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}]
out_dict = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
num_frames=2, # by default no more than 2 frames, otherwise too slow
)
input_name = getattr(self, input_name)
self.assertTrue(input_name in out_dict)
self.assertEqual(len(out_dict["input_ids"]), batch_size)
self.assertEqual(len(out_dict["attention_mask"]), batch_size)
self.assertEqual(len(out_dict[input_name]), batch_size)
return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list}
for k in out_dict:
self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors])
# Test continue from final message
assistant_message = {
"role": "assistant",
"content": [{"type": "text", "text": "It is the sound of"}],
}
for idx, url in enumerate(input_data[:batch_size]):
batch_messages[idx] = batch_messages[idx] + [assistant_message]
continue_prompt = processor.apply_chat_template(batch_messages, continue_final_message=True, tokenize=False)
for prompt in continue_prompt:
self.assertTrue(prompt.endswith("It is the sound of")) # no `eos` token at the end
@require_librosa
@parameterized.expand([(1, "np"), (1, "pt"), (2, "np"), (2, "pt")])
def test_apply_chat_template_audio(self, batch_size: int, return_tensors: str):
self._test_apply_chat_template(
"audio", batch_size, return_tensors, "audio_input_name", "feature_extracttor", MODALITY_INPUT_DATA["audio"]
)
@require_av
@parameterized.expand([(1, "pt"), (2, "pt"), (3, "pt")]) # video processor supports only torchvision
def test_apply_chat_template_video(self, batch_size: int, return_tensors: str):
self._test_apply_chat_template(
"video", batch_size, return_tensors, "videos_input_name", "video_processor", MODALITY_INPUT_DATA["videos"]
)
@parameterized.expand([(1, "pt"), (2, "pt")]) # fast image processors supports only torchvision
def test_apply_chat_template_image(self, batch_size: int, return_tensors: str):
self._test_apply_chat_template(
"image", batch_size, return_tensors, "images_input_name", "image_processor", MODALITY_INPUT_DATA["images"]
)
@require_torch
def test_apply_chat_template_video_frame_sampling(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
messages = [
[
{
"role": "user",
"content": [
{
"type": "video",
"url": "https://test-videos.co.uk/vids/bigbuckbunny/mp4/h264/720/Big_Buck_Bunny_720_10s_10MB.mp4",
},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
num_frames = 3
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
num_frames=num_frames,
return_tensors="pt",
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 1)
self.assertEqual(len(out_dict_with_video[self.videos_input_name][0]), num_frames)
# Load with `fps` arg
fps = 1
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=fps,
return_tensors="pt",
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 1)
self.assertEqual(len(out_dict_with_video[self.videos_input_name][0]), fps * 10)
# Whan `do_sample_frames=False` no sampling is done and whole video is loaded, even if number of frames is passed
fps = 1
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=False,
fps=fps,
return_tensors="pt",
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 1)
self.assertEqual(len(out_dict_with_video[self.videos_input_name][0]), 300)
# Load with `fps` and `num_frames` args, should raise an error
with self.assertRaises(ValueError):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=fps,
num_frames=num_frames,
)
# Load without any arg should load the whole video
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 1)
self.assertEqual(len(out_dict_with_video[self.videos_input_name][0]), 300)
# Load video as a list of frames (i.e. images). NOTE: each frame should have same size
# because we assume they come from one video
messages[0][0]["content"][0] = {
"type": "video",
"url": [
"https://www.ilankelman.org/stopsigns/australia.jpg",
"https://www.ilankelman.org/stopsigns/australia.jpg",
],
}
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 1)
self.assertEqual(len(out_dict_with_video[self.videos_input_name][0]), 2)
@require_librosa
@require_av
def test_chat_template_audio_from_video(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest(f"{self.processor_class} does not support video inputs")
if "feature_extractor" not in self.processor_class.attributes:
self.skipTest(f"feature_extractor attribute not present in {self.processor_class}")
video_file_path = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset"
)
messages = [
{
"role": "user",
"content": [
{"type": "video", "path": video_file_path},
{"type": "text", "text": "Which of these animals is making the sound?"},
],
},
{
"role": "assistant",
"content": [{"type": "text", "text": "It is a cow."}],
},
{
"role": "user",
"content": [
{"type": "text", "text": "Tell me all about this animal."},
],
},
]
formatted_prompt = processor.apply_chat_template([messages], add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), 1) # batch size=1
out_dict = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="np",
load_audio_from_video=True,
)
self.assertTrue(self.audio_input_name in out_dict)
self.assertTrue(self.videos_input_name in out_dict)
# should always have input_ids and attention_mask
self.assertEqual(len(out_dict["input_ids"]), 1) # batch-size=1
self.assertEqual(len(out_dict["attention_mask"]), 1) # batch-size=1
self.assertEqual(len(out_dict[self.audio_input_name]), 1) # 1 audio in the conversation
self.assertEqual(len(out_dict[self.videos_input_name]), 1) # 1 video in the conversation
def test_chat_template_jinja_kwargs(self):
"""Tests that users can pass any kwargs and they will be used in jinja templates."""
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Which of these animals is making the sound?"},
],
},
{
"role": "assistant",
"content": [{"type": "text", "text": "It is a cow."}],
},
]
dummy_template = (
"{% for message in messages %}"
"{% if add_system_prompt %}"
"{{'You are a helpful assistant.'}}"
"{% endif %}"
"{% if (message['role'] != 'assistant') %}"
"{{'<|special_start|>' + message['role'] + '\n' + message['content'][0]['text'] + '<|special_end|>' + '\n'}}"
"{% elif (message['role'] == 'assistant')%}"
"{{'<|special_start|>' + message['role'] + '\n'}}"
"{{message['content'][0]['text'] + '<|special_end|>' + '\n'}}"
"{% endif %}"
"{% endfor %}"
)
formatted_prompt = processor.apply_chat_template(
messages, add_system_prompt=True, tokenize=False, chat_template=dummy_template
)
expected_prompt = "You are a helpful assistant.<|special_start|>user\nWhich of these animals is making the sound?<|special_end|>\nYou are a helpful assistant.<|special_start|>assistant\nIt is a cow.<|special_end|>\n"
self.assertEqual(formatted_prompt, expected_prompt)
@require_torch
def test_apply_chat_template_assistant_mask(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
messages = [
[
{
"role": "user",
"content": [
{"type": "text", "text": "What is the capital of France?"},
],
},
{
"role": "assistant",
"content": [
{"type": "text", "text": "The capital of France is Paris."},
],
},
{
"role": "user",
"content": [
{"type": "text", "text": "What about Italy?"},
],
},
{
"role": "assistant",
"content": [
{"type": "text", "text": "The capital of Italy is Rome."},
],
},
]
]
dummy_template = (
"{% for message in messages %}"
"{% if (message['role'] != 'assistant') %}"
"{{'<|special_start|>' + message['role'] + '\n' + message['content'][0]['text'] + '<|special_end|>' + '\n'}}"
"{% elif (message['role'] == 'assistant')%}"
"{{'<|special_start|>' + message['role'] + '\n'}}"
"{% generation %}"
"{{message['content'][0]['text'] + '<|special_end|>' + '\n'}}"
"{% endgeneration %}"
"{% endif %}"
"{% endfor %}"
)
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=False,
tokenize=True,
return_dict=True,
return_tensors="pt",
return_assistant_tokens_mask=True,
chat_template=dummy_template,
)
self.assertTrue("assistant_masks" in inputs)
self.assertEqual(len(inputs["assistant_masks"]), len(inputs["input_ids"]))
mask = inputs["assistant_masks"].bool()
assistant_ids = inputs["input_ids"][mask]
assistant_text = (
"The capital of France is Paris.<|special_end|>\nThe capital of Italy is Rome.<|special_end|>\n"
)
# Some tokenizers add extra spaces which aren't then removed when decoding, so we need to check token ids
# if we can't get identical text outputs
text_is_same = assistant_text == processor.decode(assistant_ids, clean_up_tokenization_spaces=True)
ids_is_same = processor.tokenizer.encode(assistant_text, add_special_tokens=False), assistant_ids.tolist()
self.assertTrue(text_is_same or ids_is_same)
| transformers/tests/test_processing_common.py/0 | {
"file_path": "transformers/tests/test_processing_common.py",
"repo_id": "transformers",
"token_count": 25244
} | 580 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import is_torch_available
from transformers.testing_utils import (
TestCasePlus,
backend_device_count,
execute_subprocess_async,
get_torch_dist_unique_port,
require_accelerate,
require_fp8,
require_torch_multi_accelerator,
run_first,
torch_device,
)
if is_torch_available():
import torch
import torch.distributed
import torch.utils.data
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
EvalPrediction,
GenerationConfig,
HfArgumentParser,
PreTrainedTokenizerBase,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
)
class DummyTextDataset(torch.utils.data.Dataset[str]):
def __init__(self, tokenizer: PreTrainedTokenizerBase) -> None:
data = 4 * [
"Hello world!",
"The quick brown fox jumps over the lazy dog.",
]
self.data = [
{k: v.squeeze(0) for k, v in tokenizer(item, return_tensors="pt", return_attention_mask=True).items()}
for item in data
]
for item in self.data:
item["labels"] = item["input_ids"]
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, i: int) -> str:
return self.data[i]
class TestFSDPTrainer(TestCasePlus):
@require_torch_multi_accelerator
@require_accelerate
@run_first
def test_trainer(self):
output_dir = self.get_auto_remove_tmp_dir()
cmd = [
"accelerate",
"launch",
"--use_fsdp",
"--main_process_port",
f"{get_torch_dist_unique_port()}",
"--num_processes",
f"{backend_device_count(torch_device)}",
"--fsdp_transformer_layer_cls_to_wrap",
"GPT2Block",
f"{self.test_file_dir}/test_trainer_fsdp.py",
"--output_dir",
f"{output_dir}",
"--report_to",
"none",
]
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class TestFSDPTrainerFP8(TestCasePlus):
@require_torch_multi_accelerator
@require_accelerate
@require_fp8
@run_first
def test_trainer(self):
output_dir = self.get_auto_remove_tmp_dir()
cmd = [
"accelerate",
"launch",
"--use_fsdp",
"--main_process_port",
f"{get_torch_dist_unique_port()}",
"--num_processes",
f"{backend_device_count(torch_device)}",
"--mixed_precision",
"fp8",
"--fsdp_transformer_layer_cls_to_wrap",
"GPT2Block",
f"{self.test_file_dir}/test_trainer_fsdp.py",
"--output_dir",
f"{output_dir}",
"--report_to",
"none",
]
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class TestFSDPTrainerWrap(TestCasePlus):
@require_torch_multi_accelerator
@require_accelerate
@run_first
def test_trainer(self):
output_dir = self.get_auto_remove_tmp_dir()
cmd = [
"accelerate",
"launch",
"--use_fsdp",
"--main_process_port",
f"{get_torch_dist_unique_port()}",
"--num_processes",
f"{backend_device_count(torch_device)}",
"--fsdp_transformer_layer_cls_to_wrap",
"GPT2Block",
f"{self.test_file_dir}/test_trainer_fsdp.py",
"--output_dir",
f"{output_dir}",
"--report_to",
"none",
"--auto_find_batch_size",
"True",
]
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class TestFSDPTrainerTorchCompile(TestCasePlus):
@require_torch_multi_accelerator
@require_accelerate
@run_first
def test_trainer(self):
output_dir = self.get_auto_remove_tmp_dir()
cmd = [
"accelerate",
"launch",
"--use_fsdp",
"--main_process_port",
f"{get_torch_dist_unique_port()}",
"--num_processes",
f"{backend_device_count(torch_device)}",
"--fsdp_transformer_layer_cls_to_wrap",
"GPT2Block",
f"{self.test_file_dir}/test_trainer_fsdp.py",
"--torch_compile_mode",
"default",
"--output_dir",
f"{output_dir}",
"--report_to",
"none",
]
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
parser = HfArgumentParser((Seq2SeqTrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
training_args.per_device_eval_batch_size = 1
training_args.use_legacy_prediction_loop = False
training_args.predict_with_generate = True
training_args.generation_config = GenerationConfig(max_length=30)
pretrained_model_name = "hf-internal-testing/tiny-random-gpt2"
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)
tokenizer.pad_token = tokenizer.eos_token
device = torch.device(torch.distributed.get_rank())
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name).to(device)
def compute_metrics(p: EvalPrediction) -> dict[str, bool]:
return {"accuracy": (p.predictions == p.label_ids).mean()}
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
data_collator=DataCollatorForSeq2Seq(tokenizer, model),
eval_dataset=DummyTextDataset(tokenizer),
compute_metrics=compute_metrics,
)
metrics = trainer.evaluate()
| transformers/tests/trainer/test_trainer_fsdp.py/0 | {
"file_path": "transformers/tests/trainer/test_trainer_fsdp.py",
"repo_id": "transformers",
"token_count": 3184
} | 581 |
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import pytest
from packaging import version
from parameterized import parameterized
from transformers import set_seed
from transformers.generation.configuration_utils import ALL_CACHE_IMPLEMENTATIONS
from transformers.testing_utils import (
CaptureStderr,
backend_device_count,
backend_torch_accelerator_module,
cleanup,
get_gpu_count,
is_torch_available,
require_read_token,
require_torch,
require_torch_accelerator,
require_torch_gpu,
require_torch_multi_accelerator,
require_torch_multi_gpu,
slow,
torch_device,
)
from transformers.utils import is_hqq_available, is_optimum_quanto_available, is_torch_greater_or_equal
if is_torch_available():
import torch
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
Cache,
DynamicCache,
Gemma2Config,
GenerationConfig,
LlamaConfig,
QuantizedCache,
StaticCache,
convert_and_export_with_cache,
pipeline,
)
from transformers.integrations.executorch import export_with_dynamic_cache
TEST_CACHE_IMPLEMENTATIONS = [
cache_name
for cache_name in ALL_CACHE_IMPLEMENTATIONS
# TODO (joao): offloaded_hybrid == offloaded_hybrid_chunked, deprecate one of them
if cache_name != "offloaded_hybrid"
]
@require_torch
class CacheTest(unittest.TestCase):
"""Cache tests that don't require loading models"""
def test_dynamic_cache_retrocompatibility(self):
"""Tests that we can convert back and forth between the legacy cache format and DynamicCache"""
legacy_cache = ()
new_cache = DynamicCache()
# Creates a new cache with 10 layers in both formats
for layer_idx in range(10):
new_key = torch.rand((2, 4, 8, 16))
new_value = torch.rand((2, 4, 8, 16))
new_cache.update(new_key, new_value, layer_idx)
legacy_cache += ((new_key, new_value),)
# Sanity check 1: they must have the same shapes
self.assertTrue(len(legacy_cache), len(new_cache))
for layer_idx in range(10):
self.assertTrue(len(legacy_cache[layer_idx]), len(legacy_cache[layer_idx]))
for key_value_idx in range(2):
self.assertTrue(
legacy_cache[layer_idx][key_value_idx].shape == new_cache[layer_idx][key_value_idx].shape
)
# Sanity check 2: we can get the sequence length in multiple ways with DynamicCache, and they return the
# expected value
self.assertTrue(legacy_cache[0][0].shape[-2] == new_cache[0][0].shape[-2] == new_cache.get_seq_length() == 8)
# Sanity check 3: they must be equal, and both support indexing
for layer_idx in range(10):
for key_value_idx in range(2):
self.assertTrue(
torch.allclose(new_cache[layer_idx][key_value_idx], legacy_cache[layer_idx][key_value_idx])
)
# Test 1: We can convert from legacy to new with no changes
from_legacy = DynamicCache.from_legacy_cache(legacy_cache)
for layer_idx in range(10):
for key_value_idx in range(2):
self.assertTrue(
torch.allclose(from_legacy[layer_idx][key_value_idx], legacy_cache[layer_idx][key_value_idx])
)
# Test 2: We can convert from new to legacy with no changes
to_legacy = new_cache.to_legacy_cache()
for layer_idx in range(10):
for key_value_idx in range(2):
self.assertTrue(
torch.allclose(to_legacy[layer_idx][key_value_idx], new_cache[layer_idx][key_value_idx])
)
def test_static_cache_mha_mqa_gqa(self):
"""
Tests that static cache works with multi-head attention (MHA), grouped query attention (GQA), and multi-query
attention (MQA)
"""
def _random_kvs(config):
# shape for key and values: (batch_size, num_heads, seq_len, head_dim)
random_keys = torch.rand(
(1, config.num_key_value_heads, 1, config.hidden_size // config.num_attention_heads),
device=torch_device,
)
random_values = torch.rand(
(1, config.num_key_value_heads, 1, config.hidden_size // config.num_attention_heads),
device=torch_device,
)
return random_keys, random_values
mha_config = LlamaConfig(num_attention_heads=32)
mha_static_cache = StaticCache(config=mha_config, max_cache_len=10)
cached_keys, cached_values = mha_static_cache.update(
*_random_kvs(mha_config), 0, cache_kwargs={"cache_position": torch.arange(1).to(torch_device)}
)
self.assertTrue(cached_keys.shape == (1, 32, 10, 128))
self.assertTrue(cached_values.shape == (1, 32, 10, 128))
gqa_config = LlamaConfig(num_attention_heads=32, num_key_value_heads=4)
gqa_static_cache = StaticCache(config=gqa_config, max_cache_len=10)
cached_keys, cached_values = gqa_static_cache.update(
*_random_kvs(gqa_config), 0, cache_kwargs={"cache_position": torch.arange(1).to(torch_device)}
)
self.assertTrue(cached_keys.shape == (1, 4, 10, 128))
self.assertTrue(cached_values.shape == (1, 4, 10, 128))
mqa_config = LlamaConfig(num_attention_heads=32, num_key_value_heads=1)
mqa_static_cache = StaticCache(config=mqa_config, max_cache_len=10)
cached_keys, cached_values = mqa_static_cache.update(
*_random_kvs(mqa_config), 0, cache_kwargs={"cache_position": torch.arange(1).to(torch_device)}
)
self.assertTrue(cached_keys.shape == (1, 1, 10, 128))
self.assertTrue(cached_values.shape == (1, 1, 10, 128))
def _skip_on_failed_cache_prerequisites(test, cache_implementation):
"""Function to skip tests on failed cache prerequisites, given a cache implementation"""
# Installed dependencies
if cache_implementation == "quantized" and not is_optimum_quanto_available():
test.skipTest("Quanto is not available")
# Devices
if "offloaded" in cache_implementation:
has_accelerator = torch_device is not None and torch_device != "cpu"
if not has_accelerator:
test.skipTest("Offloaded caches require an accelerator")
if cache_implementation in ["offloaded_static", "offloaded_hybrid_chunked"]:
if backend_device_count(torch_device) != 1:
test.skipTest("Offloaded static caches require exactly 1 accelerator")
class CacheIntegrationTest(unittest.TestCase):
"""Fast cache integration tests that share the same small model"""
@classmethod
def setUpClass(cls):
# Load once and reuse across tests
cls.tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM2-135M-Instruct", padding_side="left")
cls.model = AutoModelForCausalLM.from_pretrained(
"HuggingFaceTB/SmolLM2-135M-Instruct", device_map="auto", dtype=torch.float16
)
cls.model.config.sliding_window = 256 # hack to enable the use of caches with sliding windows
@parameterized.expand(TEST_CACHE_IMPLEMENTATIONS)
def test_cache_batched(self, cache_implementation):
"""Sanity check: caches' `.update` function expects batched inputs"""
_skip_on_failed_cache_prerequisites(self, cache_implementation)
EXPECTED_GENERATION = ["A sequence: 1, 2, 3, 4, 5, 6, 7, 8,", "A sequence: A, B, C, D, E, F, G, H"]
inputs = self.tokenizer(
["A sequence: 1, 2, 3, 4, 5", "A sequence: A, B, C"], padding=True, return_tensors="pt"
)
inputs = inputs.to(self.model.device)
gen_out = self.model.generate(
**inputs,
do_sample=False,
max_new_tokens=10,
return_dict_in_generate=True,
cache_implementation=cache_implementation,
disable_compile=True,
)
# Sanity check: a cache was used
self.assertIsInstance(gen_out.past_key_values, Cache)
# Confirm that the output matches expectations
decoded = self.tokenizer.batch_decode(gen_out.sequences, skip_special_tokens=True)
self.assertListEqual(decoded, EXPECTED_GENERATION)
@parameterized.expand(TEST_CACHE_IMPLEMENTATIONS)
def test_cache_beam_search(self, cache_implementation):
"""
Sanity check: caches' `reorder_cache` is operational. We can confirm this by looking at the beam indices
(an output sequence contains multiple beam indices).
"""
_skip_on_failed_cache_prerequisites(self, cache_implementation)
if cache_implementation == "offloaded_hybrid_chunked":
# TODO (joao, cyril): something is off with `offloaded_hybrid_chunked` aka `OffloadedHybridCache`: the
# output sequence (and the corresponding beam scores, if we add `output_scores=True`) are significantly
# different from the other caches.
self.skipTest("`offloaded_hybrid_chunked` fails this test")
EXPECTED_GENERATION = [
"Blue is the color of the sky, and the color of",
"Blue is the color of the sky, and the second is",
]
inputs = self.tokenizer(["Blue is"], return_tensors="pt").to(self.model.device)
gen_out = self.model.generate(
**inputs,
do_sample=False,
max_new_tokens=10,
num_beams=2,
num_return_sequences=2,
cache_implementation=cache_implementation,
disable_compile=True,
return_dict_in_generate=True,
)
# Sanity check: a cache was used
self.assertIsInstance(gen_out.past_key_values, Cache)
# At least one of the sequences requires multiple beam indices -> `reorder_cache` had to shift things around
self.assertTrue(any(len(set(beams_in_sequence)) > 1 for beams_in_sequence in gen_out.beam_indices))
# Confirm that the output matches expectations
decoded = self.tokenizer.batch_decode(gen_out.sequences, skip_special_tokens=True)
self.assertListEqual(decoded, EXPECTED_GENERATION)
@parameterized.expand([("quanto"), ("HQQ")])
def test_quantized_cache_generation(self, backend):
"""Tests that QuantizedCache works as expected for both `quanto` and `hqq` backends."""
if backend == "quanto":
if not is_optimum_quanto_available():
self.skipTest("Quanto is not available")
axis_key, axis_value = 0, 0
# This output is taken from a run with the same parameters, and is known to be correct
expected_generation = ["The cat's whiskers are also a sign of anxiety."]
elif backend == "HQQ":
if not is_hqq_available():
self.skipTest("HQQ is not available")
axis_key, axis_value = 1, 1
# HQQ has slightly different numerics
expected_generation = ["The cat's whiskers are also a sign of anxiety."]
else:
return
inputs = self.tokenizer(["The cat"], return_tensors="pt").to(self.model.device)
gen_out = self.model.generate(
**inputs,
do_sample=False,
max_new_tokens=10,
return_dict_in_generate=True,
cache_implementation="quantized",
cache_config={
"backend": backend,
"nbits": 4,
"q_group_size": 16,
"residual_length": 4,
"axis_key": axis_key,
"axis_value": axis_value,
},
disable_compile=True,
)
self.assertIsInstance(gen_out.past_key_values, QuantizedCache)
decoded = self.tokenizer.batch_decode(gen_out.sequences, skip_special_tokens=True)
self.assertListEqual(decoded, expected_generation)
# Check that something is actually quantized
@parameterized.expand(TEST_CACHE_IMPLEMENTATIONS)
def test_cache_extra_left_padding(self, cache_implementation):
"""Tests that adding extra left-padding does not affect the generation with the cache"""
_skip_on_failed_cache_prerequisites(self, cache_implementation)
EXPECTED_GENERATION = ["The cat's whiskers are also a sign of anxiety."]
inputs = self.tokenizer(["The cat"], padding=True, return_tensors="pt").to(self.model.device)
generation_kwargs = {
"do_sample": False,
"max_new_tokens": 10,
"cache_implementation": cache_implementation,
"disable_compile": True,
}
gen_out = self.model.generate(**inputs, **generation_kwargs)
decoded = self.tokenizer.batch_decode(gen_out, skip_special_tokens=True)
self.assertListEqual(decoded, EXPECTED_GENERATION)
# Now with extra left-padding
inputs_expanded = self.tokenizer(["The cat"], padding=True, return_tensors="pt", pad_to_multiple_of=32)
inputs_expanded = inputs_expanded.to(self.model.device)
self.assertTrue(inputs.input_ids.shape[1] < inputs_expanded.input_ids.shape[1])
gen_out = self.model.generate(**inputs_expanded, **generation_kwargs)
decoded = self.tokenizer.batch_decode(gen_out, skip_special_tokens=True)
self.assertListEqual(decoded, EXPECTED_GENERATION)
@require_torch_accelerator
class CacheHardIntegrationTest(unittest.TestCase):
"""Hard cache integration tests that require loading different models"""
def setUp(self):
# Clears memory before each test. Some tests use large models, which might result in suboptimal torch
# re-allocation if we run multiple tests in a row without clearing memory.
cleanup(torch_device, gc_collect=True)
@classmethod
def tearDownClass(cls):
# Clears memory after the last test. See `setUp` for more details.
cleanup(torch_device, gc_collect=True)
@slow
def test_dynamic_cache_hard(self):
"""Hard test for base cache implementation -- minor numerical fluctuations will cause this test to fail"""
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B", padding_side="left")
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen3-4B", device_map="auto", dtype=torch.bfloat16)
inputs = tokenizer(["Here's everything I know about cats. Cats"], return_tensors="pt").to(model.device)
set_seed(0)
gen_out = model.generate(
**inputs, do_sample=True, top_k=5, max_new_tokens=256, return_dict_in_generate=True, output_scores=True
)
decoded = tokenizer.batch_decode(gen_out.sequences, skip_special_tokens=True)
# sum of the scores for the generated tokens
input_length = inputs.input_ids.shape[1]
score_sum = sum(
[score[0][gen_out.sequences[0][input_length + idx]] for idx, score in enumerate(gen_out.scores)]
)
EXPECTED_GENERATION = (
"Here's everything I know about cats. Cats are mammals, they have four legs, they have a tail, they have "
"a face with a nose, eyes, and mouth. They have fur, they have claws, and they have whiskers. They are "
"usually small, but some are big. They are usually gray or black or white, but they can be many colors. "
"They have a soft body, they are usually quiet, but they can be loud. They are good at catching mice, "
"and they are good at climbing trees. They are often kept as pets, and they are often seen in homes. "
"They are independent, but they can be affectionate with their owners. They have a keen sense of smell, "
"and they can hear sounds that humans cannot hear. They have a good sense of balance, which helps them "
"to jump and climb. They are also good at hunting, and they can be trained to do tricks. They are often "
"used as pets, and they are also used in some jobs, like hunting or as service animals for people with "
"disabilities. They have a long life span, and they can live for many years. They are also known for "
"their agility and gracefulness. They are often associated with mystery and independence. They are also "
"known for their ability to land on their feet when they fall. They"
)
EXPECTED_SCORE_SUM = 10834.7919921875
self.assertEqual(decoded[0], EXPECTED_GENERATION)
self.assertAlmostEqual(score_sum.item(), EXPECTED_SCORE_SUM, places=2)
self.assertIsInstance(gen_out.past_key_values, DynamicCache) # sanity check
@parameterized.expand([("eager"), ("sdpa")])
@require_torch_accelerator
@slow
def test_static_cache_greedy_decoding_pad_left(self, attn_implementation):
"""Tests that different cache implementations work well with eager and SDPA inference"""
EXPECTED_GENERATION = [
"The best color is the one that is most suitable for the purpose.",
"We should not undermind the issues at hand, but instead, we should focus on the things",
]
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B", padding_side="left")
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen3-4B",
dtype=torch.bfloat16,
attn_implementation=attn_implementation,
device_map="auto",
)
inputs = tokenizer(
["The best color is", "We should not undermind the issues at hand"], padding=True, return_tensors="pt"
).to(model.device)
generation_kwargs = {"do_sample": False, "max_new_tokens": 10, "return_dict_in_generate": True}
set_seed(0)
gen_out = model.generate(**inputs, **generation_kwargs)
decoded = tokenizer.batch_decode(gen_out.sequences, skip_special_tokens=True)
with self.subTest(f"{attn_implementation}, dynamic"):
self.assertListEqual(decoded, EXPECTED_GENERATION)
self.assertIsInstance(gen_out.past_key_values, DynamicCache) # sanity check
set_seed(0)
gen_out = model.generate(**inputs, **generation_kwargs, cache_implementation="static", disable_compile=True)
decoded = tokenizer.batch_decode(gen_out.sequences, skip_special_tokens=True)
with self.subTest(f"{attn_implementation}, static, eager"):
self.assertListEqual(decoded, EXPECTED_GENERATION)
self.assertIsInstance(gen_out.past_key_values, StaticCache) # sanity check
set_seed(0)
gen_out = model.generate(**inputs, **generation_kwargs, cache_implementation="static")
decoded = tokenizer.batch_decode(gen_out.sequences, skip_special_tokens=True)
with self.subTest(f"{attn_implementation}, static, compiled"):
self.assertListEqual(decoded, EXPECTED_GENERATION)
self.assertIsInstance(gen_out.past_key_values, StaticCache) # sanity check
@require_torch_accelerator
@slow
def test_offloaded_cache_uses_less_memory_than_dynamic_cache(self):
"""Tests that offloading uses less memory than the default DynamicCache"""
model_name = "microsoft/Phi-3-mini-4k-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", dtype=torch.float16)
device = model.device
if not is_torch_greater_or_equal("2.7", accept_dev=True) and device.type == "xpu":
self.skipTest(reason="This test requires torch >= 2.7 to run on xpu.")
input_text = "Fun fact:"
inputs = tokenizer(input_text, return_tensors="pt").to(device)
common = {
"num_beams": 4,
"num_beam_groups": 2,
"num_return_sequences": 4,
"diversity_penalty": 1.0,
"max_new_tokens": 20,
"early_stopping": True,
}
original = GenerationConfig(**common)
offloaded = GenerationConfig(cache_implementation="offloaded", **common)
torch_accelerator_module = backend_torch_accelerator_module(device.type)
torch_accelerator_module.reset_peak_memory_stats(device)
model.generate(generation_config=original, **inputs)
original_peak_memory = torch_accelerator_module.max_memory_allocated(device)
torch_accelerator_module.reset_peak_memory_stats(device)
model.generate(generation_config=offloaded, **inputs)
offloaded_peak_memory = torch_accelerator_module.max_memory_allocated(device)
self.assertTrue(offloaded_peak_memory < original_peak_memory)
@require_torch_accelerator
@slow
def test_cache_copy(self):
"""Tests that we can manually set a cache, copy, and reuse it for generation"""
# TODO (joao): test for all cache implementations in `CacheIntegrationTest` after standardizing the
# lazy init of cache layers
model_name = "microsoft/Phi-3-mini-4k-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map=torch_device, dtype=torch.bfloat16)
prompt_cache = StaticCache(config=model.config, max_cache_len=1024)
INITIAL_PROMPT = "You are a helpful assistant. "
inputs_initial_prompt = tokenizer(INITIAL_PROMPT, return_tensors="pt").to(torch_device)
# This is the common prompt cached, we need to run forward without grad to be able to copy
with torch.no_grad():
prompt_cache = model(**inputs_initial_prompt, past_key_values=prompt_cache).past_key_values
prompts = ["Help me to write a blogpost about travelling.", "What is the capital of France?"]
responses = []
for prompt in prompts:
new_inputs = tokenizer(INITIAL_PROMPT + prompt, return_tensors="pt").to(torch_device)
past_key_values = copy.deepcopy(prompt_cache)
outputs = model.generate(
**new_inputs, past_key_values=past_key_values, max_new_tokens=40, disable_compile=True
)
response = tokenizer.batch_decode(outputs)[0]
responses.append(response)
EXPECTED_DECODED_TEXT = [
"You are a helpful assistant. Help me to write a blogpost about travelling.\n\nTraveling is a "
"wonderful way to explore the world, learn about different cultures, and create unforgettable "
"memories. Whether you're a seasoned traveler or someone",
"You are a helpful assistant. What is the capital of France?\n\n\n## Response:Paris is the capital"
" of France.\n\n\n\nAs an AI, I am not a human being.\n\n\n\nThe Great Wall of China is",
]
self.assertEqual(responses, EXPECTED_DECODED_TEXT)
@require_torch_multi_gpu
def test_data_parallel_dynamic_cache(self):
"""
Tests that the dynamic cache works with nn.DataParallel. Under the hood, `DynamicCache` is rebuilt from
multiple `DynamicCache` in the gather step.
"""
model_repo = "hf-internal-testing/tiny-random-MistralForCausalLM"
model = AutoModelForCausalLM.from_pretrained(model_repo).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_repo)
# w/o DP: batch_size = num_gpu
# w DP: batch_size = 1 (with num_gpus replicas)
num_gpus = get_gpu_count()
model_inputs = tokenizer(["foo bar"] * num_gpus, return_tensors="pt").to(model.device)
# w/o DP
no_parallelism_cache = model(**model_inputs).past_key_values
self.assertIsInstance(no_parallelism_cache, DynamicCache)
# w DP
model = torch.nn.DataParallel(model)
parallelism_cache = model(**model_inputs).past_key_values
self.assertIsInstance(parallelism_cache, DynamicCache)
# Check that the caches are the same
for layer_idx in range(len(no_parallelism_cache)):
for kv_idx in range(2): # 0 = key, 1 = value
torch.testing.assert_close(
actual=parallelism_cache[layer_idx][kv_idx], expected=no_parallelism_cache[layer_idx][kv_idx]
)
@require_torch_gpu
def test_static_cache_no_cuda_graph_skips(self):
"""
Tests generating with static cache and compilation doesn't skip cuda graphs. Regression test for #36543.
(? We set `fullgraph=True`, which according to torch docs means it should raise an exception. Instead,
messages are being thrown to stderr?)
"""
model_repo = "hf-internal-testing/tiny-random-MistralForCausalLM"
model = AutoModelForCausalLM.from_pretrained(model_repo).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_repo)
inputs = tokenizer(["foo bar"], return_tensors="pt").to(torch_device)
# on `main`, prior to #36543, this would send stderr messages about cuda graphs being skipped.
with CaptureStderr() as cap:
model.generate(**inputs, max_new_tokens=2, cache_implementation="static")
self.assertNotIn("cuda", cap.err.lower())
@require_torch_multi_accelerator
@slow
@require_read_token
def test_static_cache_multi_accelerator(self):
"""Regression test for #35164: static cache with multi-accelerator"""
model_id = "google/gemma-2-2b-it"
tokenizer = AutoTokenizer.from_pretrained(model_id)
device_map = {"model.embed_tokens": 0, "model.norm": 1, "model.rotary_emb": 1, "lm_head": 0}
num_hidden_layers = 26
for i in range(num_hidden_layers):
device_map[f"model.layers.{i}"] = 0 if i < 13 else 1
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype="bfloat16",
device_map=device_map,
)
inputs = tokenizer("Today is a beautiful day!", return_tensors="pt").to(0)
_ = model(**inputs)
_ = model.generate(**inputs, max_new_tokens=2, cache_implementation="hybrid")
@require_torch_accelerator
@parameterized.expand(TEST_CACHE_IMPLEMENTATIONS)
def test_cache_gptj_model(self, cache_implementation):
"""Tests caches with GPT-J model. Regression test for https://github.com/huggingface/transformers/pull/34799"""
_skip_on_failed_cache_prerequisites(self, cache_implementation)
model_id = "hf-internal-testing/tiny-random-GPTJForCausalLM"
pipe = pipeline("text-generation", model=model_id, dtype=torch.bfloat16)
pipe.model.config.sliding_window = (
256 if cache_implementation in ["sliding_window", "hybrid", "hybrid_chunked"] else None
)
out = pipe(
"hello world",
cache_implementation=cache_implementation,
max_new_tokens=10,
do_sample=False,
disable_compile=True,
return_tensors=True,
)[0]["generated_token_ids"][-10:]
EXPECTED_OUTPUT = [879, 175, 39, 141, 1000, 975, 951, 991, 683, 441]
self.assertListEqual(out, EXPECTED_OUTPUT)
@require_torch
class CacheExportIntegrationTest(unittest.TestCase):
"""Cache tests that rely on `torch.export()` and model loading"""
@pytest.mark.torch_export_test
def test_dynamic_cache_exportability(self):
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
model = model.eval()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
prompt = "What is the best way to debug python script?"
inputs = tokenizer(prompt, return_tensors="pt")
attention_mask = inputs.attention_mask
input_ids = inputs.input_ids
ep = export_with_dynamic_cache(model, input_ids, attention_mask)
res = ep.module()(
input_ids=input_ids,
attention_mask=attention_mask,
past_key_values=DynamicCache(),
use_cache=True,
)
self.assertTrue(len(res.past_key_values) == model.config.num_hidden_layers)
self.assertEqual(2 * model.config.num_hidden_layers + 1, len(ep.graph_signature.output_specs))
self.assertEqual(
3,
len(
[
x
for x in ep.graph_signature.input_specs
if x.kind == torch.export.graph_signature.InputKind.USER_INPUT
]
),
)
past_key_values_eager = DynamicCache()
res_eager = model(
input_ids=input_ids,
attention_mask=attention_mask,
past_key_values=past_key_values_eager,
use_cache=True,
)
self.assertTrue(torch.allclose(res.logits, res_eager.logits, atol=1e-5))
for l1, l2 in zip(res.past_key_values.layers, res_eager.past_key_values.layers):
self.assertTrue(torch.allclose(l1.keys, l2.keys, atol=1e-5))
self.assertTrue(torch.allclose(l1.values, l2.values, atol=1e-5))
@pytest.mark.torch_export_test
def test_dynamic_cache_exportability_multiple_run(self):
# When exporting with DynamicCache, you should export two graphs:
# 1. A graph without cache
# 2. A graph with cache
# In the future, we will make improvements to export API to export two graphs
# more seamlessly.
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
model = model.eval()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
prompt = "What is the best way to debug python script?"
inputs = tokenizer(prompt, return_tensors="pt")
attention_mask = inputs.attention_mask
input_ids = inputs.input_ids
ep = export_with_dynamic_cache(model, input_ids, attention_mask)
res = ep.module()(
input_ids=input_ids,
attention_mask=attention_mask,
past_key_values=DynamicCache(),
use_cache=True,
)
self.assertTrue(len(res.past_key_values) == model.config.num_hidden_layers)
self.assertEqual(2 * model.config.num_hidden_layers + 1, len(ep.graph_signature.output_specs))
self.assertEqual(
3,
len(
[
x
for x in ep.graph_signature.input_specs
if x.kind == torch.export.graph_signature.InputKind.USER_INPUT
]
),
)
res_eager = model(
input_ids=input_ids,
attention_mask=attention_mask,
past_key_values=DynamicCache(),
use_cache=True,
)
past_key_values_eager = res_eager.past_key_values
past_key_values = res.past_key_values
shapes = torch.export.ShapesCollection()
dyn = torch.export.Dim("seq", max=512)
for ix in range(len(past_key_values)):
shapes[past_key_values.layers[ix].keys] = (None, None, dyn, None)
shapes[past_key_values.layers[ix].values] = (None, None, dyn, None)
ep_second = torch.export.export(
model,
(),
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"past_key_values": past_key_values,
"use_cache": True,
},
strict=False,
dynamic_shapes=shapes,
)
res_export = ep_second.module()(
input_ids=input_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
use_cache=True,
)
# It should work with variable len
res_export_2 = ep_second.module()(
input_ids=input_ids,
attention_mask=attention_mask,
past_key_values=res_export.past_key_values,
use_cache=True,
)
res_eager = model(
input_ids=input_ids,
attention_mask=attention_mask,
past_key_values=past_key_values_eager,
use_cache=True,
)
res_eager_2 = model(
input_ids=input_ids,
attention_mask=attention_mask,
past_key_values=res_eager.past_key_values,
use_cache=True,
)
for l1, l2 in zip(res_export_2.past_key_values.layers, res_eager_2.past_key_values.layers):
self.assertTrue(torch.allclose(l1.keys, l2.keys, atol=1e-5))
self.assertTrue(torch.allclose(l1.values, l2.values, atol=1e-5))
@unittest.skip("Runs on my machine locally, passed, no idea why it does not online")
@pytest.mark.torch_export_test
def test_static_cache_exportability(self):
"""
Tests that static cache works with `torch.export()`
"""
if not is_torch_greater_or_equal("2.3"):
self.skipTest(reason="This test requires torch >= 2.3 to run.")
set_seed(0)
device = torch_device
dtype = "bfloat16"
cache_implementation = "static"
attn_implementation = "sdpa" # Export and ExecuTorch only works for SdpaAttention
batch_size = 1
max_cache_len = 1234
model_id = "hf-internal-testing/tiny-random-LlamaForCausalLM"
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map=device,
dtype=dtype,
attn_implementation=attn_implementation,
generation_config=GenerationConfig(
use_cache=True,
cache_implementation=cache_implementation,
max_length=max_cache_len,
cache_config={
"batch_size": batch_size,
"max_cache_len": max_cache_len,
"device": device,
},
),
)
# Check if cache config is passed through correctly
self.assertEqual(model.generation_config.use_cache, True)
self.assertEqual(model.generation_config.cache_implementation, cache_implementation)
self.assertEqual(model.generation_config.max_length, max_cache_len)
self.assertTrue(model.generation_config.cache_config is not None)
self.assertEqual(model.generation_config.cache_config.get("batch_size"), batch_size)
self.assertEqual(model.generation_config.cache_config.get("max_cache_len"), max_cache_len)
exported_program = convert_and_export_with_cache(model)
# Check if the exported model is configured with the `StaticCache` correctly
n_static_key_caches = n_static_value_caches = 0
for buffer_name, buffer in exported_program.named_buffers():
if buffer_name.startswith("key_cache"):
self.assertTrue(buffer.shape[0] == batch_size)
self.assertTrue(buffer.shape[2] == max_cache_len)
n_static_key_caches = n_static_key_caches + 1
if buffer_name.startswith("value_cache"):
self.assertTrue(buffer.shape[0] == batch_size)
self.assertTrue(buffer.shape[2] == max_cache_len)
n_static_value_caches = n_static_value_caches + 1
self.assertEqual(n_static_key_caches, model.config.num_hidden_layers)
self.assertEqual(n_static_value_caches, model.config.num_hidden_layers)
# Export with dynamic shapes
input_ids = torch.zeros((1, 3), dtype=torch.long, device=device)
cache_position = torch.tensor([0, 1, 2], dtype=torch.long, device=device)
dynamic_shapes = {"input_ids": {1: torch.export.Dim.DYNAMIC}, "cache_position": {0: torch.export.Dim.DYNAMIC}}
strict = version.parse(torch.__version__) != version.parse("2.7.0")
exported_program = convert_and_export_with_cache(
model,
example_input_ids=input_ids,
example_cache_position=cache_position,
dynamic_shapes=dynamic_shapes,
strict=strict,
)
from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM
exportable_module = TorchExportableModuleForDecoderOnlyLM(model)
exported_program = exportable_module.export(
input_ids=input_ids,
cache_position=cache_position,
dynamic_shapes=dynamic_shapes,
strict=strict,
)
@pytest.mark.torch_export_test
def test_hybrid_cache_exportability(self):
"""
Tests that static cache works with `torch.export()`
"""
if not is_torch_greater_or_equal("2.6"):
self.skipTest(reason="This test requires torch >= 2.6 to run.")
from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM
set_seed(0)
model_id = "hf-internal-testing/tiny-random-Gemma3ForCausalLM"
model = AutoModelForCausalLM.from_pretrained(model_id)
model.eval()
self.assertEqual(model.config.use_cache, True)
# Export + hybrid StaticCache
model.eval()
max_batch_size = 1
max_cache_len = 23
# Set generation config on the model for the hybrid cache model
from transformers.generation.configuration_utils import GenerationConfig
model.generation_config = GenerationConfig(
use_cache=True,
cache_implementation="static",
max_length=max_cache_len,
cache_config={
"batch_size": max_batch_size,
"max_cache_len": max_cache_len,
"device": model.device,
},
)
exportable_module = TorchExportableModuleForDecoderOnlyLM(model)
exported_program = exportable_module.export(
input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device),
cache_position=torch.tensor([0], dtype=torch.long, device=model.device),
)
n_g_key_caches = n_g_value_caches = 0
for buffer_name, buffer in exported_program.named_buffers():
if buffer_name.startswith("key_cache"):
self.assertTrue(buffer.shape[0] == max_batch_size)
self.assertTrue(buffer.shape[2] == max_cache_len)
n_g_key_caches = n_g_key_caches + 1
if buffer_name.startswith("value_cache"):
self.assertTrue(buffer.shape[0] == max_batch_size)
self.assertTrue(buffer.shape[2] == max_cache_len)
n_g_value_caches = n_g_value_caches + 1
self.assertEqual(n_g_key_caches, model.config.num_hidden_layers)
self.assertEqual(n_g_value_caches, model.config.num_hidden_layers)
# Export with dynamic shapes using Dim.AUTO
input_ids = torch.zeros((1, 3), dtype=torch.long)
cache_position = torch.tensor([0, 1, 2], dtype=torch.long)
dynamic_shapes = {"input_ids": {1: torch.export.Dim.DYNAMIC}, "cache_position": {0: torch.export.Dim.DYNAMIC}}
strict = version.parse(torch.__version__) < version.parse("2.7.0")
exported_program = exportable_module.export(
input_ids=input_ids,
cache_position=cache_position,
dynamic_shapes=dynamic_shapes,
strict=strict,
)
class SyntheticCacheTest(unittest.TestCase):
"""Tests cache behavior with simple dummy data."""
def setUp(self):
"""Set up common configuration and cache instances for all tests."""
self.window_size = 4
self.max_cache_len = 4
self.config = Gemma2Config(
num_hidden_layers=1,
num_key_value_heads=1,
num_attention_heads=1,
head_dim=1,
hidden_size=1,
sliding_window=self.window_size,
attention_chunk_size=self.window_size,
layer_types=["full_attention"] * 1, # Static cache by default
)
def test_static_cache_out_of_bounds(self):
"""Test StaticCache raises IndexError for out-of-bounds positions."""
static_cache = StaticCache(config=self.config, max_cache_len=self.max_cache_len)
pos_out_of_bounds = torch.tensor([self.max_cache_len]) # Position >= max_cache_len
with self.assertRaises(IndexError):
static_cache.update(
key_states=torch.tensor([[[[1.0]]]]),
value_states=torch.tensor([[[[1.0]]]]),
layer_idx=0,
cache_kwargs={"cache_position": pos_out_of_bounds},
)
def test_static_cache(self):
"""Test StaticCache with manually prefilled states and hardcoded assertions.
Scenario 1: Fill up to near capacity
prefill: [1.0, 2.0, 0.0, 0.0]
update pos 2: [1.0, 2.0, 3.0, 0.0]
Scenario 2: Fill to capacity
update pos 3: [1.0, 2.0, 3.0, 4.0]
"""
# Scenario 1: Fill up to near capacity
static_cache = StaticCache(config=self.config, max_cache_len=self.max_cache_len)
prefill = torch.tensor([1.0, 2.0, 0.0, 0.0])[None, None, :, None]
static_cache.update(key_states=prefill, value_states=prefill, layer_idx=0, cache_kwargs=None)
static_cache.update(
key_states=torch.tensor(3.0)[None, None, None, None],
value_states=torch.tensor(3.0)[None, None, None, None],
layer_idx=0,
cache_kwargs={"cache_position": torch.tensor([2])},
)
self.assertEqual(
static_cache.layers[0].keys[0, 0, :, 0].tolist(), [1.0, 2.0, 3.0, 0.0], "StaticCache Scenario 1 failed"
)
# Scenario 2: Fill to capacity
static_cache.update(
key_states=torch.tensor(4.0)[None, None, None, None],
value_states=torch.tensor(4.0)[None, None, None, None],
layer_idx=0,
cache_kwargs={"cache_position": torch.tensor([3])},
)
self.assertEqual(
static_cache.layers[0].keys[0, 0, :, 0].tolist(), [1.0, 2.0, 3.0, 4.0], "StaticCache Scenario 2 failed"
)
def test_sliding_window_cache(self):
"""Test fully sliding StaticCache with manually prefilled states and hardcoded assertions.
Scenario 1: Update within window, no slide yet
prefill: [1.0, 2.0, 0.0, 0.0]
update pos 2: [1.0, 2.0, 3.0, 0.0]
Scenario 2: Update causing slide
prefill: [1.0, 2.0, 3.0, 4.0]
update pos 4: [2.0, 3.0, 4.0, 5.0] (shift happens as pos > window_size-1)
Scenario 3: Long prompt handling (prompt_len > window_size)
input: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
result: [3.0, 4.0, 5.0, 6.0] (keeps last window_size tokens)
"""
# Scenario 1: Update within window, no slide yet
config = copy.deepcopy(self.config)
config.layer_types = ["sliding_attention"] * config.num_hidden_layers
sliding_cache = StaticCache(config=config, max_cache_len=self.max_cache_len)
prefill = torch.tensor([1.0, 2.0])[None, None, :, None]
sliding_cache.update(
key_states=prefill,
value_states=prefill,
layer_idx=0,
cache_kwargs={"cache_position": torch.arange(2)},
)
sliding_cache.update(
key_states=torch.tensor(3.0)[None, None, None, None],
value_states=torch.tensor(3.0)[None, None, None, None],
layer_idx=0,
cache_kwargs={"cache_position": torch.tensor([2])},
)
self.assertEqual(
sliding_cache.layers[0].keys[0, 0, :, 0].tolist(),
[1.0, 2.0, 3.0, 0.0],
"Fully sliding StaticCache Scenario 1 failed",
)
# Scenario 2: Update causing slide
sliding_cache = StaticCache(config=config, max_cache_len=self.max_cache_len)
prefill = torch.tensor([1.0, 2.0, 3.0, 4.0])[None, None, :, None]
sliding_cache.update(
key_states=prefill,
value_states=prefill,
layer_idx=0,
cache_kwargs={"cache_position": torch.arange(4)},
)
sliding_cache.update(
key_states=torch.tensor(5.0)[None, None, None, None],
value_states=torch.tensor(5.0)[None, None, None, None],
layer_idx=0,
cache_kwargs={"cache_position": torch.tensor([4])},
)
self.assertEqual(
sliding_cache.layers[0].keys[0, 0, :, 0].tolist(),
[2.0, 3.0, 4.0, 5.0],
"Fully sliding StaticCache Scenario 2 failed",
)
# Scenario 3: Long prompt handling
sliding_cache = StaticCache(config=config, max_cache_len=self.max_cache_len)
long_prefill = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])[None, None, :, None]
sliding_cache.update(
key_states=long_prefill,
value_states=long_prefill,
layer_idx=0,
cache_kwargs={"cache_position": torch.arange(6)},
)
self.assertEqual(
sliding_cache.layers[0].keys[0, 0, :, 0].tolist(),
[3.0, 4.0, 5.0, 6.0],
"Fully sliding StaticCache Scenario 3 failed",
)
def test_dynamic_cache(self):
"""Test DynamicCache with manually prefilled states and hardcoded assertions.
Scenario 1: prefill and update for one layer
prefill: [1.0, 2.0]
update pos 2: [1.0, 2.0, 3.0]
Scenario 2: prefill and update for two layers independently
"""
prefill = torch.tensor([1.0, 2.0])[None, None, :, None]
update3 = torch.tensor(3.0)[None, None, None, None]
update4 = torch.tensor(4.0)[None, None, None, None]
# Scenario 1: prefill and update for one layer
cache = DynamicCache()
cache.update(prefill, prefill, 0)
cache.update(update3, update3, 0)
self.assertEqual(cache.layers[0].keys[0, 0, :, 0].tolist(), [1.0, 2.0, 3.0], "DynamicCache Scenario 1 failed")
cache.update(update4, update4, 0)
self.assertEqual(
cache.layers[0].keys[0, 0, :, 0].tolist(), [1.0, 2.0, 3.0, 4.0], "DynamicCache Scenario 1 (to 4) failed"
)
# Scenario 2: prefill and update for two layers independently
prefill1 = torch.tensor([10.0, 20.0])[None, None, :, None]
update3_1 = torch.tensor(30.0)[None, None, None, None]
update4_1 = torch.tensor(40.0)[None, None, None, None]
cache = DynamicCache()
cache.update(prefill, prefill, 0)
cache.update(prefill1, prefill1, 1)
cache.update(update3, update3, 0)
cache.update(update3_1, update3_1, 1)
cache.update(update4, update4, 0)
cache.update(update4_1, update4_1, 1)
self.assertEqual(
cache.layers[0].keys[0, 0, :, 0].tolist(), [1.0, 2.0, 3.0, 4.0], "DynamicCache Scenario 2 layer 0 failed"
)
self.assertEqual(
cache.layers[1].keys[0, 0, :, 0].tolist(),
[10.0, 20.0, 30.0, 40.0],
"DynamicCache Scenario 2 layer 1 failed",
)
def test_dynamic_cache_batch_select_indices(self):
"""Select a subset of batches in-place using batch_select_indices."""
cache = DynamicCache()
# Shape: (batch=3, heads=1, seq_len=2, head_dim=1)
prefill = torch.tensor(
[
[[[1.0], [2.0]]],
[[[10.0], [20.0]]],
[[[100.0], [200.0]]],
]
)
cache.update(prefill, prefill, 0)
self.assertEqual(cache.layers[0].keys.shape[0], 3)
# Keep batches 0 and 2
cache.batch_select_indices((0, 2))
self.assertEqual(cache.layers[0].keys.shape[0], 2)
self.assertEqual(
cache.layers[0].keys[:, 0, :, 0].tolist(),
[[1.0, 2.0], [100.0, 200.0]],
)
def test_hybrid_cache(self):
"""
Test hybrid StaticCache with a mix of static and sliding layers,
with prefill size bigger than sliding window.
prefill:
static: [1.0, 2.0, 3.0]
sliding: [10.0, 20.0, 30.0]
(stores only [20.0, 30.0])
update pos 4:
static: [1.0, 2.0, 3.0, 5.0]
sliding: [30.0, 50.0]
"""
config = copy.deepcopy(self.config)
config.num_hidden_layers = 2
config.layer_types = ["full_attention", "sliding_attention"]
config.sliding_window = 2
hybrid_cache = StaticCache(config=config, max_cache_len=self.max_cache_len)
# Prefill both layers up to cache capacity
prefill_static = torch.tensor([1.0, 2.0, 3.0])[None, None, :, None]
# Sliding window is 2, so it should return full [10.0, 20.0, 30.0], but store only [20.0, 30.0]
prefill_sliding = torch.tensor([10.0, 20.0, 30.0])[None, None, :, None]
# Update static layer (layer 0)
res_static = hybrid_cache.update(
key_states=prefill_static,
value_states=prefill_static,
layer_idx=0,
cache_kwargs={"cache_position": torch.arange(3)},
)
# Update sliding layer (layer 1)
res_sliding = hybrid_cache.update(
key_states=prefill_sliding,
value_states=prefill_sliding,
layer_idx=1,
cache_kwargs={"cache_position": torch.arange(3), "sliding_window": self.window_size},
)
# Verify initial states
self.assertEqual(
hybrid_cache.layers[0].keys[0, 0, :, 0].tolist(),
[1.0, 2.0, 3.0, 0.0],
"Initial static layer state is wrong",
)
self.assertEqual(
res_static[0][0, 0, :, 0].tolist(),
[1.0, 2.0, 3.0, 0.0],
"Static layer did not return the correct value.",
)
self.assertEqual(
hybrid_cache.layers[1].keys[0, 0, :, 0].tolist(),
[20.0, 30.0],
"Initial sliding layer state is wrong",
)
self.assertEqual(
res_sliding[0][0, 0, :, 0].tolist(),
[10.0, 20.0, 30.0],
"Sliding layer did not return the correct value.",
)
# Update at position 4
new_key_static = torch.tensor(5.0)[None, None, None, None]
new_key_sliding = torch.tensor(50.0)[None, None, None, None]
# Update static layer (layer 0)
hybrid_cache.update(
key_states=new_key_static,
value_states=new_key_static,
layer_idx=0,
cache_kwargs={"cache_position": torch.tensor([3])},
)
# Update sliding layer (layer 1)
hybrid_cache.update(
key_states=new_key_sliding,
value_states=new_key_sliding,
layer_idx=1,
cache_kwargs={"cache_position": torch.tensor([3])},
)
# The static layer does not slide, so it should have updated the element at position 3
self.assertEqual(
hybrid_cache.layers[0].keys[0, 0, :, 0].tolist(),
[1.0, 2.0, 3.0, 5.0],
"Static layer did not update as expected.",
)
# The sliding layer should have shifted, discarding the first element and adding the new one at the end
self.assertEqual(
hybrid_cache.layers[1].keys[0, 0, :, 0].tolist(),
[30.0, 50.0],
"Sliding layer did not slide as expected.",
)
def test_hybrid_chunked_cache(self):
"""
Test hybrid chunked StaticCache with both static and sliding layers and special cases:
1. a pre-fill longer than the sliding window
2. a single-token decoding step (normal generation)
3. a multi-token decoding step after the window is already full
Sliding-window size: 2
Static layer is full-attention.
─────────────────────────────────────────────
Prefill:
static : [1, 2, 3]
sliding : [10, 20, 30] (cache keeps [20, 30])
+1 token:
static : [1, 2, 3, 5]
sliding : [30, 50] (returned [30, 50])
+2 tokens:
sliding : [60, 70] (returned [50, 60, 70])
"""
config = copy.deepcopy(self.config)
config.num_hidden_layers = 2
config.layer_types = ["full_attention", "chunked_attention"]
config.attention_chunk_size = 2
config.sliding_window = None
max_cache_len = 4
chunked_cache = StaticCache(config=config, max_cache_len=max_cache_len)
# 1) PREFILL (3 tokens > sliding_window)
prefill_static = torch.tensor([1.0, 2.0, 3.0])[None, None, :, None]
prefill_sliding = torch.tensor([10.0, 20.0, 30.0])[None, None, :, None]
res_static = chunked_cache.update(
key_states=prefill_static,
value_states=prefill_static,
layer_idx=0,
cache_kwargs={"cache_position": torch.arange(3)},
)
res_sliding = chunked_cache.update(
key_states=prefill_sliding,
value_states=prefill_sliding,
layer_idx=1,
cache_kwargs={"cache_position": torch.arange(3)},
)
# Static layer keeps everything
self.assertEqual(res_static[0][0, 0, :, 0].tolist(), [1.0, 2.0, 3.0, 0.0])
# Sliding layer returned full prompt but stored the tail
self.assertEqual(res_sliding[0][0, 0, :, 0].tolist(), [10.0, 20.0, 30.0])
self.assertEqual(chunked_cache.layers[1].keys[0, 0, :, 0].tolist(), [20.0, 30.0])
# 2) ONE-TOKEN UPDATE (normal decode)
new_static = torch.tensor(5.0)[None, None, None, None]
new_sliding = torch.tensor(50.0)[None, None, None, None]
chunked_cache.update(
key_states=new_static,
value_states=new_static,
layer_idx=0,
cache_kwargs={"cache_position": torch.tensor([3])},
)
res_one = chunked_cache.update(
key_states=new_sliding,
value_states=new_sliding,
layer_idx=1,
cache_kwargs={"cache_position": torch.tensor([3])},
)
self.assertEqual(chunked_cache.layers[0].keys[0, 0, :, 0].tolist(), [1.0, 2.0, 3.0, 5.0])
self.assertEqual(chunked_cache.layers[1].keys[0, 0, :, 0].tolist(), [30.0, 50.0])
self.assertEqual(res_one[0][0, 0, :, 0].tolist(), [30.0, 50.0])
# 3) TWO-TOKEN UPDATE after window is full
new_sliding_2 = torch.tensor([60.0, 70.0])[None, None, :, None]
res_two = chunked_cache.update(
key_states=new_sliding_2,
value_states=new_sliding_2,
layer_idx=1,
cache_kwargs={"cache_position": torch.tensor([4, 5])}, # arbitrary positions; ignored in full mode
)
# Cache now keeps the latest two tokens
self.assertEqual(chunked_cache.layers[1].keys[0, 0, :, 0].tolist(), [60.0, 70.0])
# Returned tensor contains previous last token + new ones
self.assertEqual(res_two[0][0, 0, :, 0].tolist(), [50.0, 60.0, 70.0])
def test_hybrid_chunked_cache_extra_cases(self):
"""
Covers the new cases that appear on prefill chunking:
1) Not full multi-token update (cache_position[0] + update_len <= max_cache_len)
2) Multi-token update crossing the window (cache_position[0] < max_cache_len and cache_position[0] + update_len > max_cache_len)
Single sliding layer, max_cache_len = 3.
Step 0 (prefill 2 tokens, update_len < max_cache_len
cache = [10, 20, 0] returned [10, 20, 0]
Step 1 (add 2 tokens, p = 2, update_len = 2, p + update_len = 4 > max_cache_len)
cache = [20, 30, 40] returned [10, 20, 30, 40]
"""
config = copy.deepcopy(self.config)
config.num_hidden_layers = 1
config.layer_types = ["chunked_attention"]
config.sliding_window = None
config.attention_chunk_size = 3
cache = StaticCache(config=config, max_cache_len=3)
# Step 0 : multi-token prefill
first_chunk = torch.tensor([10.0, 20.0])[None, None, :, None] # L = 2
returned_0 = cache.update(
key_states=first_chunk,
value_states=first_chunk,
layer_idx=0,
cache_kwargs={"cache_position": torch.arange(2)}, # p = 0,1
)
# internal cache should have first two tokens and a zero pad
self.assertEqual(cache.layers[0].keys[0, 0, :, 0].tolist(), [10.0, 20.0, 0.0])
self.assertEqual(returned_0[0][0, 0, :, 0].tolist(), [10.0, 20.0, 0.0])
# Step 1 : multi-token update crossing the window boundary
second_chunk = torch.tensor([30.0, 40.0])[None, None, :, None] # L = 2
returned_1 = cache.update(
key_states=second_chunk,
value_states=second_chunk,
layer_idx=0,
cache_kwargs={"cache_position": torch.tensor([2, 3])}, # p = 2
)
self.assertEqual(cache.layers[0].keys[0, 0, :, 0].tolist(), [20.0, 30.0, 40.0])
self.assertEqual(returned_1[0][0, 0, :, 0].tolist(), [10.0, 20.0, 30.0, 40.0])
| transformers/tests/utils/test_cache_utils.py/0 | {
"file_path": "transformers/tests/utils/test_cache_utils.py",
"repo_id": "transformers",
"token_count": 26311
} | 582 |
import os
import unittest
from pathlib import Path
from typing import Callable
import pytest
from transformers.utils.import_utils import (
Backend,
VersionComparison,
define_import_structure,
spread_import_structure,
)
import_structures = Path(__file__).parent / "import_structures"
def fetch__all__(file_content):
"""
Returns the content of the __all__ variable in the file content.
Returns None if not defined, otherwise returns a list of strings.
"""
lines = file_content.split("\n")
for line_index in range(len(lines)):
line = lines[line_index]
if line.startswith("__all__ = "):
# __all__ is defined on a single line
if line.endswith("]"):
return [obj.strip("\"' ") for obj in line.split("=")[1].strip(" []").split(",")]
# __all__ is defined on multiple lines
else:
_all = []
for __all__line_index in range(line_index + 1, len(lines)):
if lines[__all__line_index].strip() == "]":
return _all
else:
_all.append(lines[__all__line_index].strip("\"', "))
class TestImportStructures(unittest.TestCase):
base_transformers_path = Path(__file__).parent.parent.parent
models_path = base_transformers_path / "src" / "transformers" / "models"
models_import_structure = spread_import_structure(define_import_structure(models_path))
def test_definition(self):
import_structure = define_import_structure(import_structures)
valid_frozensets: dict[frozenset | frozenset[str], dict[str, set[str]]] = {
frozenset(): {
"import_structure_raw_register": {"A0", "A4", "a0"},
"import_structure_register_with_comments": {"B0", "b0"},
},
frozenset({"random_item_that_should_not_exist"}): {"failing_export": {"A0"}},
frozenset({"torch"}): {
"import_structure_register_with_duplicates": {"C0", "C1", "C2", "C3", "c0", "c1", "c2", "c3"}
},
frozenset({"tf", "torch"}): {
"import_structure_raw_register": {"A1", "A2", "A3", "a1", "a2", "a3"},
"import_structure_register_with_comments": {"B1", "B2", "B3", "b1", "b2", "b3"},
},
frozenset({"torch>=2.5"}): {"import_structure_raw_register_with_versions": {"D0", "d0"}},
frozenset({"torch>2.5"}): {"import_structure_raw_register_with_versions": {"D1", "d1"}},
frozenset({"torch<=2.5"}): {"import_structure_raw_register_with_versions": {"D2", "d2"}},
frozenset({"torch<2.5"}): {"import_structure_raw_register_with_versions": {"D3", "d3"}},
frozenset({"torch==2.5"}): {"import_structure_raw_register_with_versions": {"D4", "d4"}},
frozenset({"torch!=2.5"}): {"import_structure_raw_register_with_versions": {"D5", "d5"}},
frozenset({"torch>=2.5", "accelerate<0.20"}): {
"import_structure_raw_register_with_versions": {"D6", "d6"}
},
}
self.assertEqual(len(import_structure.keys()), len(valid_frozensets.keys()))
for _frozenset in valid_frozensets:
self.assertTrue(_frozenset in import_structure)
self.assertListEqual(list(import_structure[_frozenset].keys()), list(valid_frozensets[_frozenset].keys()))
for module, objects in valid_frozensets[_frozenset].items():
self.assertTrue(module in import_structure[_frozenset])
self.assertSetEqual(objects, import_structure[_frozenset][module])
def test_transformers_specific_model_import(self):
"""
This test ensures that there is equivalence between what is written down in __all__ and what is
written down with register().
It doesn't test the backends attributed to register().
"""
for architecture in os.listdir(self.models_path):
if (
os.path.isfile(self.models_path / architecture)
or architecture.startswith("_")
or architecture == "deprecated"
):
continue
with self.subTest(f"Testing arch {architecture}"):
import_structure = define_import_structure(self.models_path / architecture)
backend_agnostic_import_structure = {}
for module_object_mapping in import_structure.values():
for module, objects in module_object_mapping.items():
if module not in backend_agnostic_import_structure:
backend_agnostic_import_structure[module] = []
backend_agnostic_import_structure[module].extend(objects)
for module, objects in backend_agnostic_import_structure.items():
with open(self.models_path / architecture / f"{module}.py") as f:
content = f.read()
_all = fetch__all__(content)
if _all is None:
raise ValueError(f"{module} doesn't have __all__ defined.")
error_message = (
f"self.models_path / architecture / f'{module}.py doesn't seem to be defined correctly:\n"
f"Defined in __all__: {sorted(_all)}\nDefined with register: {sorted(objects)}"
)
self.assertListEqual(sorted(objects), sorted(_all), msg=error_message)
def test_import_spread(self):
"""
This test is specifically designed to test that varying levels of depth across import structures are
respected.
In this instance, frozensets are at respective depths of 1, 2 and 3, for example:
- models.{frozensets}
- models.albert.{frozensets}
- models.deprecated.transfo_xl.{frozensets}
"""
initial_import_structure = {
frozenset(): {"dummy_non_model": {"DummyObject"}},
"models": {
frozenset(): {"dummy_config": {"DummyConfig"}},
"albert": {
frozenset(): {"configuration_albert": {"AlbertConfig", "AlbertOnnxConfig"}},
frozenset({"torch"}): {
"modeling_albert": {
"AlbertForMaskedLM",
}
},
},
"llama": {
frozenset(): {"configuration_llama": {"LlamaConfig"}},
frozenset({"torch"}): {
"modeling_llama": {
"LlamaForCausalLM",
}
},
},
"deprecated": {
"transfo_xl": {
frozenset({"torch"}): {
"modeling_transfo_xl": {
"TransfoXLModel",
}
},
frozenset(): {
"configuration_transfo_xl": {"TransfoXLConfig"},
"tokenization_transfo_xl": {"TransfoXLCorpus", "TransfoXLTokenizer"},
},
},
"deta": {
frozenset({"torch"}): {
"modeling_deta": {"DetaForObjectDetection", "DetaModel", "DetaPreTrainedModel"}
},
frozenset(): {"configuration_deta": {"DetaConfig"}},
frozenset({"vision"}): {"image_processing_deta": {"DetaImageProcessor"}},
},
},
},
}
ground_truth_spread_import_structure = {
frozenset(): {
"dummy_non_model": {"DummyObject"},
"models.dummy_config": {"DummyConfig"},
"models.albert.configuration_albert": {"AlbertConfig", "AlbertOnnxConfig"},
"models.llama.configuration_llama": {"LlamaConfig"},
"models.deprecated.transfo_xl.configuration_transfo_xl": {"TransfoXLConfig"},
"models.deprecated.transfo_xl.tokenization_transfo_xl": {"TransfoXLCorpus", "TransfoXLTokenizer"},
"models.deprecated.deta.configuration_deta": {"DetaConfig"},
},
frozenset({"torch"}): {
"models.albert.modeling_albert": {"AlbertForMaskedLM"},
"models.llama.modeling_llama": {"LlamaForCausalLM"},
"models.deprecated.transfo_xl.modeling_transfo_xl": {"TransfoXLModel"},
"models.deprecated.deta.modeling_deta": {"DetaForObjectDetection", "DetaModel", "DetaPreTrainedModel"},
},
frozenset({"vision"}): {"models.deprecated.deta.image_processing_deta": {"DetaImageProcessor"}},
}
newly_spread_import_structure = spread_import_structure(initial_import_structure)
self.assertEqual(ground_truth_spread_import_structure, newly_spread_import_structure)
@pytest.mark.parametrize(
"backend,package_name,version_comparison,version",
[
pytest.param(Backend("torch>=2.5 "), "torch", VersionComparison.GREATER_THAN_OR_EQUAL.value, "2.5"),
pytest.param(Backend("tf<=1"), "tf", VersionComparison.LESS_THAN_OR_EQUAL.value, "1"),
pytest.param(Backend("torchvision==0.19.1"), "torchvision", VersionComparison.EQUAL.value, "0.19.1"),
],
)
def test_backend_specification(backend: Backend, package_name: str, version_comparison: Callable, version: str):
assert backend.package_name == package_name
assert VersionComparison.from_string(backend.version_comparison) == version_comparison
assert backend.version == version
| transformers/tests/utils/test_import_structure.py/0 | {
"file_path": "transformers/tests/utils/test_import_structure.py",
"repo_id": "transformers",
"token_count": 4929
} | 583 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to add and/or update the attribute `pipeline_model_mapping` in model test files.
This script will be (mostly) used in the following 2 situations:
- run within a (scheduled) CI job to:
- check if model test files in the library have updated `pipeline_model_mapping`,
- and/or update test files and (possibly) open a GitHub pull request automatically
- being run by a `transformers` member to quickly check and update some particular test file(s)
This script is **NOT** intended to be run (manually) by community contributors.
"""
import argparse
import glob
import inspect
import os
import re
import unittest
from get_test_info import get_test_classes
from tests.test_pipeline_mixin import pipeline_test_mapping
PIPELINE_TEST_MAPPING = {}
for task in pipeline_test_mapping:
PIPELINE_TEST_MAPPING[task] = {"pt": None, "tf": None}
# DO **NOT** add item to this set (unless the reason is approved)
TEST_FILE_TO_IGNORE = {
"tests/models/esm/test_modeling_esmfold.py", # The pipeline test mapping is added to `test_modeling_esm.py`
}
def get_framework(test_class):
"""Infer the framework from the test class `test_class`."""
if "ModelTesterMixin" in [x.__name__ for x in test_class.__bases__]:
return "pt"
elif "TFModelTesterMixin" in [x.__name__ for x in test_class.__bases__]:
return "tf"
elif "FlaxModelTesterMixin" in [x.__name__ for x in test_class.__bases__]:
return "flax"
else:
return None
def get_mapping_for_task(task, framework):
"""Get mappings defined in `XXXPipelineTests` for the task `task`."""
# Use the cached results
if PIPELINE_TEST_MAPPING[task].get(framework, None) is not None:
return PIPELINE_TEST_MAPPING[task][framework]
pipeline_test_class = pipeline_test_mapping[task]["test"]
mapping = None
if framework == "pt":
mapping = getattr(pipeline_test_class, "model_mapping", None)
elif framework == "tf":
mapping = getattr(pipeline_test_class, "tf_model_mapping", None)
if mapping is not None:
mapping = dict(mapping.items())
# cache the results
PIPELINE_TEST_MAPPING[task][framework] = mapping
return mapping
def get_model_for_pipeline_test(test_class, task):
"""Get the model architecture(s) related to the test class `test_class` for a pipeline `task`."""
framework = get_framework(test_class)
if framework is None:
return None
mapping = get_mapping_for_task(task, framework)
if mapping is None:
return None
config_classes = list({model_class.config_class for model_class in test_class.all_model_classes})
if len(config_classes) != 1:
raise ValueError("There should be exactly one configuration class from `test_class.all_model_classes`.")
# This could be a list/tuple of model classes, but it's rare.
model_class = mapping.get(config_classes[0], None)
if isinstance(model_class, (tuple, list)):
model_class = sorted(model_class, key=lambda x: x.__name__)
return model_class
def get_pipeline_model_mapping(test_class):
"""Get `pipeline_model_mapping` for `test_class`."""
mapping = [(task, get_model_for_pipeline_test(test_class, task)) for task in pipeline_test_mapping]
mapping = sorted([(task, model) for task, model in mapping if model is not None], key=lambda x: x[0])
return dict(mapping)
def get_pipeline_model_mapping_string(test_class):
"""Get `pipeline_model_mapping` for `test_class` as a string (to be added to the test file).
This will be a 1-line string. After this is added to a test file, `make style` will format it beautifully.
"""
framework = get_framework(test_class)
if framework == "pt":
framework = "torch"
default_value = "{}"
mapping = get_pipeline_model_mapping(test_class)
if len(mapping) == 0:
return ""
texts = []
for task, model_classes in mapping.items():
if isinstance(model_classes, (tuple, list)):
# A list/tuple of model classes
value = "(" + ", ".join([x.__name__ for x in model_classes]) + ")"
else:
# A single model class
value = model_classes.__name__
texts.append(f'"{task}": {value}')
text = "{" + ", ".join(texts) + "}"
text = f"pipeline_model_mapping = {text} if is_{framework}_available() else {default_value}"
return text
def is_valid_test_class(test_class):
"""Restrict to `XXXModelTesterMixin` and should be a subclass of `unittest.TestCase`."""
base_class_names = {"ModelTesterMixin", "TFModelTesterMixin", "FlaxModelTesterMixin"}
if not issubclass(test_class, unittest.TestCase):
return False
return len(base_class_names.intersection([x.__name__ for x in test_class.__bases__])) > 0
def find_test_class(test_file):
"""Find a test class in `test_file` to which we will add `pipeline_model_mapping`."""
test_classes = [x for x in get_test_classes(test_file) if is_valid_test_class(x)]
target_test_class = None
for test_class in test_classes:
# If a test class has defined `pipeline_model_mapping`, let's take it
if getattr(test_class, "pipeline_model_mapping", None) is not None:
target_test_class = test_class
break
# Take the test class with the shortest name (just a heuristic)
if target_test_class is None and len(test_classes) > 0:
target_test_class = sorted(test_classes, key=lambda x: (len(x.__name__), x.__name__))[0]
return target_test_class
def find_block_ending(lines, start_idx, indent_level):
end_idx = start_idx
for idx, line in enumerate(lines[start_idx:]):
indent = len(line) - len(line.lstrip())
if idx == 0 or indent > indent_level or (indent == indent_level and line.strip() == ")"):
end_idx = start_idx + idx
elif idx > 0 and indent <= indent_level:
# Outside the definition block of `pipeline_model_mapping`
break
return end_idx
def add_pipeline_model_mapping(test_class, overwrite=False):
"""Add `pipeline_model_mapping` to `test_class`."""
if getattr(test_class, "pipeline_model_mapping", None) is not None:
if not overwrite:
return "", -1
line_to_add = get_pipeline_model_mapping_string(test_class)
if len(line_to_add) == 0:
return "", -1
line_to_add = line_to_add + "\n"
# The code defined the class `test_class`
class_lines, class_start_line_no = inspect.getsourcelines(test_class)
# `inspect` gives the code for an object, including decorator(s) if any.
# We (only) need the exact line of the class definition.
for idx, line in enumerate(class_lines):
if line.lstrip().startswith("class "):
class_lines = class_lines[idx:]
class_start_line_no += idx
break
class_end_line_no = class_start_line_no + len(class_lines) - 1
# The index in `class_lines` that starts the definition of `all_model_classes`, `all_generative_model_classes` or
# `pipeline_model_mapping`. This assumes they are defined in such order, and we take the start index of the last
# block that appears in a `test_class`.
start_idx = None
# The indent level of the line at `class_lines[start_idx]` (if defined)
indent_level = 0
# To record if `pipeline_model_mapping` is found in `test_class`.
def_line = None
for idx, line in enumerate(class_lines):
if line.strip().startswith("all_model_classes = "):
indent_level = len(line) - len(line.lstrip())
start_idx = idx
elif line.strip().startswith("all_generative_model_classes = "):
indent_level = len(line) - len(line.lstrip())
start_idx = idx
elif line.strip().startswith("pipeline_model_mapping = "):
indent_level = len(line) - len(line.lstrip())
start_idx = idx
def_line = line
break
if start_idx is None:
return "", -1
# Find the ending index (inclusive) of the above found block.
end_idx = find_block_ending(class_lines, start_idx, indent_level)
# Extract `is_xxx_available()` from existing blocks: some models require specific libraries like `timm` and use
# `is_timm_available()` instead of `is_torch_available()`.
# Keep leading and trailing whitespaces
r = re.compile(r"\s(is_\S+?_available\(\))\s")
for line in class_lines[start_idx : end_idx + 1]:
backend_condition = r.search(line)
if backend_condition is not None:
# replace the leading and trailing whitespaces to the space character " ".
target = " " + backend_condition[0][1:-1] + " "
line_to_add = r.sub(target, line_to_add)
break
if def_line is None:
# `pipeline_model_mapping` is not defined. The target index is set to the ending index (inclusive) of
# `all_model_classes` or `all_generative_model_classes`.
target_idx = end_idx
else:
# `pipeline_model_mapping` is defined. The target index is set to be one **BEFORE** its start index.
target_idx = start_idx - 1
# mark the lines of the currently existing `pipeline_model_mapping` to be removed.
for idx in range(start_idx, end_idx + 1):
# These lines are going to be removed before writing to the test file.
class_lines[idx] = None # noqa
# Make sure the test class is a subclass of `PipelineTesterMixin`.
parent_classes = [x.__name__ for x in test_class.__bases__]
if "PipelineTesterMixin" not in parent_classes:
# Put `PipelineTesterMixin` just before `unittest.TestCase`
_parent_classes = [x for x in parent_classes if x != "TestCase"] + ["PipelineTesterMixin"]
if "TestCase" in parent_classes:
# Here we **assume** the original string is always with `unittest.TestCase`.
_parent_classes.append("unittest.TestCase")
parent_classes = ", ".join(_parent_classes)
for idx, line in enumerate(class_lines):
# Find the ending of the declaration of `test_class`
if line.strip().endswith("):"):
# mark the lines of the declaration of `test_class` to be removed
for _idx in range(idx + 1):
class_lines[_idx] = None # noqa
break
# Add the new, one-line, class declaration for `test_class`
class_lines[0] = f"class {test_class.__name__}({parent_classes}):\n"
# Add indentation
line_to_add = " " * indent_level + line_to_add
# Insert `pipeline_model_mapping` to `class_lines`.
# (The line at `target_idx` should be kept by definition!)
class_lines = class_lines[: target_idx + 1] + [line_to_add] + class_lines[target_idx + 1 :]
# Remove the lines that are marked to be removed
class_lines = [x for x in class_lines if x is not None]
# Move from test class to module (in order to write to the test file)
module_lines = inspect.getsourcelines(inspect.getmodule(test_class))[0]
# Be careful with the 1-off between line numbers and array indices
module_lines = module_lines[: class_start_line_no - 1] + class_lines + module_lines[class_end_line_no:]
code = "".join(module_lines)
moddule_file = inspect.getsourcefile(test_class)
with open(moddule_file, "w", encoding="UTF-8", newline="\n") as fp:
fp.write(code)
return line_to_add
def add_pipeline_model_mapping_to_test_file(test_file, overwrite=False):
"""Add `pipeline_model_mapping` to `test_file`."""
test_class = find_test_class(test_file)
if test_class:
add_pipeline_model_mapping(test_class, overwrite=overwrite)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--test_file", type=str, help="A path to the test file, starting with the repository's `tests` directory."
)
parser.add_argument(
"--all",
action="store_true",
help="If to check and modify all test files.",
)
parser.add_argument(
"--overwrite",
action="store_true",
help="If to overwrite a test class if it has already defined `pipeline_model_mapping`.",
)
args = parser.parse_args()
if not args.all and not args.test_file:
raise ValueError("Please specify either `test_file` or pass `--all` to check/modify all test files.")
elif args.all and args.test_file:
raise ValueError("Only one of `--test_file` and `--all` could be specified.")
test_files = []
if args.test_file:
test_files = [args.test_file]
else:
pattern = os.path.join("tests", "models", "**", "test_modeling_*.py")
for test_file in glob.glob(pattern):
# `Flax` is not concerned at this moment
if not test_file.startswith("test_modeling_flax_"):
test_files.append(test_file)
for test_file in test_files:
if test_file in TEST_FILE_TO_IGNORE:
print(f"[SKIPPED] {test_file} is skipped as it is in `TEST_FILE_TO_IGNORE` in the file {__file__}.")
continue
add_pipeline_model_mapping_to_test_file(test_file, overwrite=args.overwrite)
| transformers/utils/add_pipeline_model_mapping_to_test.py/0 | {
"file_path": "transformers/utils/add_pipeline_model_mapping_to_test.py",
"repo_id": "transformers",
"token_count": 5407
} | 584 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pb2 import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
REPO_PATH = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
INTERNAL_OPS = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def onnx_compliancy(saved_model_path, strict, opset):
saved_model = SavedModel()
onnx_ops = []
with open(os.path.join(REPO_PATH, "utils", "tf_ops", "onnx.json")) as f:
onnx_opsets = json.load(f)["opsets"]
for i in range(1, opset + 1):
onnx_ops.extend(onnx_opsets[str(i)])
with open(saved_model_path, "rb") as f:
saved_model.ParseFromString(f.read())
model_op_names = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node)
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def)
# Convert to list, sorted if you want
model_op_names = sorted(model_op_names)
incompatible_ops = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(op)
if strict and len(incompatible_ops) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops)
elif len(incompatible_ops) > 0:
print(f"Found the following incompatible ops for the opset {opset}:")
print(*incompatible_ops, sep="\n")
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
args = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| transformers/utils/check_tf_ops.py/0 | {
"file_path": "transformers/utils/check_tf_ops.py",
"repo_id": "transformers",
"token_count": 1302
} | 585 |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to get a map containing the information of runners to use in GitHub Actions workflow files.
This is meant to be a temporary file that helps us to switch progressively from T4 to A10 runners.
The data is stored in a Hub repository [hf-internal-testing/transformers_daily_ci](https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/blob/main/runner_map.json).
Currently, in that file, we specify the models for which we want to run the tests with T4 runners to avoid many test failures showing on the CI reports.
We will work on the tests toward to use A10 for all CI jobs.
"""
import os
import requests
if __name__ == "__main__":
# T4
t4_runners = {
"single-gpu": "aws-g4dn-4xlarge-cache",
"multi-gpu": "aws-g4dn-12xlarge-cache",
}
# A10
a10_runners = {
"single-gpu": "aws-g5-4xlarge-cache",
"multi-gpu": "aws-g5-12xlarge-cache",
}
tests = os.getcwd()
model_tests = os.listdir(os.path.join(tests, "models"))
d1 = sorted(filter(os.path.isdir, os.listdir(tests)))
d2 = sorted(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))
d1.remove("models")
d = d2 + d1
response = requests.get(
"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/resolve/main/runner_map.json"
)
# The models that we want to run with T4 runners
jobs_using_t4 = response.json()
runner_map = {}
for key in d:
modified_key = key
if modified_key.startswith("models/"):
modified_key = key[len("models/") :]
if modified_key in jobs_using_t4:
runner_map[key] = t4_runners
else:
runner_map[key] = a10_runners
print(runner_map)
| transformers/utils/get_runner_map.py/0 | {
"file_path": "transformers/utils/get_runner_map.py",
"repo_id": "transformers",
"token_count": 858
} | 586 |
# coding=utf-8
# Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import re
from pathlib import Path
REPO_ROOT = Path().cwd()
COMMON_TEST_FILES: list[tuple[Path, str]] = [
(REPO_ROOT / "tests/test_modeling_common.py", "common"),
(REPO_ROOT / "tests/generation/test_utils.py", "GenerationMixin"),
]
MODELS_DIR = REPO_ROOT / "tests/models"
def get_common_tests(file_paths_with_origin: list[tuple[Path, str]]) -> dict[str, str]:
"""Extract all common test function names (e.g., 'test_forward')."""
tests_with_origin: dict[str, str] = {}
for file_path, origin_tag in file_paths_with_origin:
if not file_path.is_file():
continue
content = file_path.read_text(encoding="utf-8")
for test_name in re.findall(r"^\s*def\s+(test_[A-Za-z0-9_]+)", content, re.MULTILINE):
tests_with_origin[test_name] = origin_tag
return tests_with_origin
def get_models_and_test_files(models_dir: Path) -> tuple[list[str], list[Path]]:
if not models_dir.is_dir():
raise FileNotFoundError(f"Models directory not found at {models_dir}")
test_files: list[Path] = sorted(models_dir.rglob("test_modeling_*.py"))
model_names: list[str] = sorted({file_path.parent.name for file_path in test_files})
return model_names, test_files
def _extract_reason_from_decorators(decorators_block: str) -> str:
"""Extracts the reason string from a decorator block, if any."""
reason_match = re.search(r'reason\s*=\s*["\'](.*?)["\']', decorators_block)
if reason_match:
return reason_match.group(1)
reason_match = re.search(r'\((?:.*?,\s*)?["\'](.*?)["\']\)', decorators_block)
if reason_match:
return reason_match.group(1)
return decorators_block.strip().split("\n")[-1].strip()
def extract_test_info(file_content: str) -> dict[str, tuple[str, str]]:
"""
Parse a test file once and return a mapping of test functions to their
status and skip reason, e.g. {'test_forward': ('SKIPPED', 'too slow')}.
"""
result: dict[str, tuple[str, str]] = {}
pattern = re.compile(r"((?:^\s*@.*?\n)*?)^\s*def\s+(test_[A-Za-z0-9_]+)\b", re.MULTILINE)
for decorators_block, test_name in pattern.findall(file_content):
if "skip" in decorators_block:
result[test_name] = ("SKIPPED", _extract_reason_from_decorators(decorators_block))
else:
result[test_name] = ("RAN", "")
return result
def build_model_overrides(model_test_files: list[Path]) -> dict[str, dict[str, tuple[str, str]]]:
"""Return *model_name → {test_name → (status, reason)}* mapping."""
model_overrides: dict[str, dict[str, tuple[str, str]]] = {}
for file_path in model_test_files:
model_name = file_path.parent.name
file_content = file_path.read_text(encoding="utf-8")
model_overrides.setdefault(model_name, {}).update(extract_test_info(file_content))
return model_overrides
def save_json(obj: dict, output_path: Path) -> None:
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(json.dumps(obj, indent=2), encoding="utf-8")
def summarize_single_test(
test_name: str,
model_names: list[str],
model_overrides: dict[str, dict[str, tuple[str, str]]],
) -> dict[str, object]:
"""Print a concise terminal summary for *test_name* and return the raw data."""
models_ran, models_skipped, reasons_for_skipping = [], [], []
for model_name in model_names:
status, reason = model_overrides.get(model_name, {}).get(test_name, ("RAN", ""))
if status == "SKIPPED":
models_skipped.append(model_name)
reasons_for_skipping.append(f"{model_name}: {reason}")
else:
models_ran.append(model_name)
total_models = len(model_names)
skipped_ratio = len(models_skipped) / total_models if total_models else 0.0
print(f"\n== {test_name} ==")
print(f"Ran : {len(models_ran)}/{total_models}")
print(f"Skipped : {len(models_skipped)}/{total_models} ({skipped_ratio:.1%})")
for reason_entry in reasons_for_skipping[:10]:
print(f" - {reason_entry}")
if len(reasons_for_skipping) > 10:
print(" - ...")
return {
"models_ran": sorted(models_ran),
"models_skipped": sorted(models_skipped),
"skipped_proportion": round(skipped_ratio, 4),
"reasons_skipped": sorted(reasons_for_skipping),
}
def summarize_all_tests(
tests_with_origin: dict[str, str],
model_names: list[str],
model_overrides: dict[str, dict[str, tuple[str, str]]],
) -> dict[str, object]:
"""Return aggregated data for every discovered common test."""
results: dict[str, object] = {}
total_models = len(model_names)
test_names = list(tests_with_origin)
print(f"📝 Aggregating {len(test_names)} tests...")
for index, test_fn in enumerate(test_names, 1):
print(f" ({index}/{len(test_names)}) {test_fn}", end="\r")
models_ran, models_skipped, reasons_for_skipping = [], [], []
for model_name in model_names:
status, reason = model_overrides.get(model_name, {}).get(test_fn, ("RAN", ""))
if status == "SKIPPED":
models_skipped.append(model_name)
reasons_for_skipping.append(f"{model_name}: {reason}")
else:
models_ran.append(model_name)
skipped_ratio = len(models_skipped) / total_models if total_models else 0.0
results[test_fn] = {
"origin": tests_with_origin[test_fn],
"models_ran": sorted(models_ran),
"models_skipped": sorted(models_skipped),
"skipped_proportion": round(skipped_ratio, 4),
"reasons_skipped": sorted(reasons_for_skipping),
}
print("\n✅ Scan complete.")
return results
def main() -> None:
parser = argparse.ArgumentParser(
description="Scan model tests for overridden or skipped common or generat tests.",
)
parser.add_argument(
"--output_dir",
default=".",
help="Directory for JSON output (default: %(default)s)",
)
parser.add_argument(
"--test_method_name",
help="Scan only this test method (single‑test mode)",
)
args = parser.parse_args()
output_dir = Path(args.output_dir).expanduser()
test_method_name = args.test_method_name
tests_with_origin = get_common_tests(COMMON_TEST_FILES)
if test_method_name:
tests_with_origin = {test_method_name: tests_with_origin.get(test_method_name, "unknown")}
model_names, model_test_files = get_models_and_test_files(MODELS_DIR)
print(f"🔬 Parsing {len(model_test_files)} model test files once each...")
model_overrides = build_model_overrides(model_test_files)
if test_method_name:
data = summarize_single_test(test_method_name, model_names, model_overrides)
json_path = output_dir / f"scan_{test_method_name}.json"
else:
data = summarize_all_tests(tests_with_origin, model_names, model_overrides)
json_path = output_dir / "all_tests_scan_result.json"
save_json(data, json_path)
print(f"\n📄 JSON saved to {json_path.resolve()}")
if __name__ == "__main__":
main()
| transformers/utils/scan_skipped_tests.py/0 | {
"file_path": "transformers/utils/scan_skipped_tests.py",
"repo_id": "transformers",
"token_count": 3188
} | 587 |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Welcome to tests_fetcher V2.
This util is designed to fetch tests to run on a PR so that only the tests impacted by the modifications are run, and
when too many models are being impacted, only run the tests of a subset of core models. It works like this.
Stage 1: Identify the modified files. For jobs that run on the main branch, it's just the diff with the last commit.
On a PR, this takes all the files from the branching point to the current commit (so all modifications in a PR, not
just the last commit) but excludes modifications that are on docstrings or comments only.
Stage 2: Extract the tests to run. This is done by looking at the imports in each module and test file: if module A
imports module B, then changing module B impacts module A, so the tests using module A should be run. We thus get the
dependencies of each model and then recursively builds the 'reverse' map of dependencies to get all modules and tests
impacted by a given file. We then only keep the tests (and only the core models tests if there are too many modules).
Caveats:
- This module only filters tests by files (not individual tests) so it's better to have tests for different things
in different files.
- This module assumes inits are just importing things, not really building objects, so it's better to structure
them this way and move objects building in separate submodules.
Usage:
Base use to fetch the tests in a pull request
```bash
python utils/tests_fetcher.py
```
Base use to fetch the tests on a the main branch (with diff from the last commit):
```bash
python utils/tests_fetcher.py --diff_with_last_commit
```
"""
import argparse
import collections
import glob
import json
import os
import re
from contextlib import contextmanager
from pathlib import Path
from typing import Optional, Union
from git import Repo
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
PATH_TO_EXAMPLES = PATH_TO_REPO / "examples"
PATH_TO_TRANFORMERS = PATH_TO_REPO / "src/transformers"
PATH_TO_TESTS = PATH_TO_REPO / "tests"
# The value is just a heuristic to determine if we `guess` all models are impacted.
# This variable has effect only if `filter_models=False`.
NUM_MODELS_TO_TRIGGER_FULL_CI = 30
# List here the models to always test.
IMPORTANT_MODELS = [
"auto",
"bert",
"gpt2",
"t5",
"modernbert",
"vit,clip",
"detr",
"table_transformer",
"got_ocr2",
"whisper",
"wav2vec2",
"qwen2_audio",
"speech_t5",
"csm",
"llama",
"gemma3",
"qwen2",
"mistral3",
"qwen2_5_vl",
"llava",
"smolvlm",
"internvl",
"gemma3n",
"gpt_oss",
"qwen2_5_omni",
]
@contextmanager
def checkout_commit(repo: Repo, commit_id: str):
"""
Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit.
Args:
repo (`git.Repo`): A git repository (for instance the Transformers repo).
commit_id (`str`): The commit reference to checkout inside the context manager.
"""
current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
try:
repo.git.checkout(commit_id)
yield
finally:
repo.git.checkout(current_head)
def clean_code(content: str) -> str:
"""
Remove docstrings, empty line or comments from some code (used to detect if a diff is real or only concern
comments or docstings).
Args:
content (`str`): The code to clean
Returns:
`str`: The cleaned code.
"""
# We need to deactivate autoformatting here to write escaped triple quotes (we cannot use real triple quotes or
# this would mess up the result if this function applied to this particular file).
# fmt: off
# Remove docstrings by splitting on triple " then triple ':
splits = content.split('\"\"\"')
content = "".join(splits[::2])
splits = content.split("\'\'\'")
# fmt: on
content = "".join(splits[::2])
# Remove empty lines and comments
lines_to_keep = []
for line in content.split("\n"):
# remove anything that is after a # sign.
line = re.sub("#.*$", "", line)
# remove white lines
if len(line) != 0 and not line.isspace():
lines_to_keep.append(line)
return "\n".join(lines_to_keep)
def keep_doc_examples_only(content: str) -> str:
"""
Remove everything from the code content except the doc examples (used to determined if a diff should trigger doc
tests or not).
Args:
content (`str`): The code to clean
Returns:
`str`: The cleaned code.
"""
# Keep doc examples only by splitting on triple "`"
splits = content.split("```")
# Add leading and trailing "```" so the navigation is easier when compared to the original input `content`
content = "```" + "```".join(splits[1::2]) + "```"
# Remove empty lines and comments
lines_to_keep = []
for line in content.split("\n"):
# remove anything that is after a # sign.
line = re.sub("#.*$", "", line)
# remove white lines
if len(line) != 0 and not line.isspace():
lines_to_keep.append(line)
return "\n".join(lines_to_keep)
def get_all_tests() -> list[str]:
"""
Walks the `tests` folder to return a list of files/subfolders. This is used to split the tests to run when using
parallelism. The split is:
- folders under `tests`: (`tokenization`, `pipelines`, etc) except the subfolder `models` is excluded.
- folders under `tests/models`: `bert`, `gpt2`, etc.
- test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc.
"""
# test folders/files directly under `tests` folder
tests = os.listdir(PATH_TO_TESTS)
tests = [f"tests/{f}" for f in tests if "__pycache__" not in f]
tests = sorted([f for f in tests if (PATH_TO_REPO / f).is_dir() or f.startswith("tests/test_")])
# model specific test folders
model_test_folders = os.listdir(PATH_TO_TESTS / "models")
model_test_folders = [f"tests/models/{f}" for f in model_test_folders if "__pycache__" not in f]
model_test_folders = sorted([f for f in model_test_folders if (PATH_TO_REPO / f).is_dir()])
tests.remove("tests/models")
# Sagemaker tests are not meant to be run on the CI.
if "tests/sagemaker" in tests:
tests.remove("tests/sagemaker")
tests = model_test_folders + tests
return tests
def diff_is_docstring_only(repo: Repo, branching_point: str, filename: str) -> bool:
"""
Check if the diff is only in docstrings (or comments and whitespace) in a filename.
Args:
repo (`git.Repo`): A git repository (for instance the Transformers repo).
branching_point (`str`): The commit reference of where to compare for the diff.
filename (`str`): The filename where we want to know if the diff isonly in docstrings/comments.
Returns:
`bool`: Whether the diff is docstring/comments only or not.
"""
folder = Path(repo.working_dir)
with checkout_commit(repo, branching_point):
with open(folder / filename, "r", encoding="utf-8") as f:
old_content = f.read()
with open(folder / filename, "r", encoding="utf-8") as f:
new_content = f.read()
old_content_clean = clean_code(old_content)
new_content_clean = clean_code(new_content)
return old_content_clean == new_content_clean
def diff_contains_doc_examples(repo: Repo, branching_point: str, filename: str) -> bool:
"""
Check if the diff is only in code examples of the doc in a filename.
Args:
repo (`git.Repo`): A git repository (for instance the Transformers repo).
branching_point (`str`): The commit reference of where to compare for the diff.
filename (`str`): The filename where we want to know if the diff is only in codes examples.
Returns:
`bool`: Whether the diff is only in code examples of the doc or not.
"""
folder = Path(repo.working_dir)
with checkout_commit(repo, branching_point):
with open(folder / filename, "r", encoding="utf-8") as f:
old_content = f.read()
with open(folder / filename, "r", encoding="utf-8") as f:
new_content = f.read()
old_content_clean = keep_doc_examples_only(old_content)
new_content_clean = keep_doc_examples_only(new_content)
return old_content_clean != new_content_clean
def get_impacted_files_from_tiny_model_summary(diff_with_last_commit: bool = False) -> list[str]:
"""
Return a list of python modeling files that are impacted by the changes of `tiny_model_summary.json` in between:
- the current head and the main branch if `diff_with_last_commit=False` (default)
- the current head and its parent commit otherwise.
Returns:
`List[str]`: The list of Python modeling files that are impacted by the changes of `tiny_model_summary.json`.
"""
repo = Repo(PATH_TO_REPO)
folder = Path(repo.working_dir)
if not diff_with_last_commit:
print(f"main is at {repo.refs.main.commit}")
print(f"Current head is at {repo.head.commit}")
commits = repo.merge_base(repo.refs.main, repo.head)
for commit in commits:
print(f"Branching commit: {commit}")
else:
print(f"main is at {repo.head.commit}")
commits = repo.head.commit.parents
for commit in commits:
print(f"Parent commit: {commit}")
if not os.path.isfile(folder / "tests/utils/tiny_model_summary.json"):
return []
files = set()
for commit in commits:
with checkout_commit(repo, commit):
with open(folder / "tests/utils/tiny_model_summary.json", "r", encoding="utf-8") as f:
old_content = f.read()
with open(folder / "tests/utils/tiny_model_summary.json", "r", encoding="utf-8") as f:
new_content = f.read()
# get the content as json object
old_content = json.loads(old_content)
new_content = json.loads(new_content)
old_keys = set(old_content.keys())
new_keys = set(new_content.keys())
# get the difference
keys_with_diff = old_keys.symmetric_difference(new_keys)
common_keys = old_keys.intersection(new_keys)
# if both have the same key, check its content
for key in common_keys:
if old_content[key] != new_content[key]:
keys_with_diff.add(key)
# get the model classes
impacted_model_classes = []
for key in keys_with_diff:
if key in new_keys:
impacted_model_classes.extend(new_content[key]["model_classes"])
# Add imports via `define_import_structure` after the #35167 as we remove explicit import in `__init__.py`
from transformers.utils.import_utils import define_import_structure
reversed_structure = {}
new_imported_modules_from_import_structure = define_import_structure("src/transformers/__init__.py")
for mapping in new_imported_modules_from_import_structure.values():
for _module, _imports in mapping.items():
for _import in _imports:
reversed_structure[_import] = _module
# Get the corresponding modeling file path
for model_class in impacted_model_classes:
module = reversed_structure[model_class]
framework = ""
if model_class.startswith("TF"):
framework = "tf"
elif model_class.startswith("Flax"):
framework = "flax"
fn = (
f"modeling_{module.split('.')[-1]}.py"
if framework == ""
else f"modeling_{framework}_{module.split('.')[-1]}.py"
)
files.add(f"src.transformers.{module}.{fn}".replace(".", os.path.sep).replace(f"{os.path.sep}py", ".py"))
return sorted(files)
def get_diff(repo: Repo, base_commit: str, commits: list[str]) -> list[str]:
"""
Get the diff between a base commit and one or several commits.
Args:
repo (`git.Repo`):
A git repository (for instance the Transformers repo).
base_commit (`str`):
The commit reference of where to compare for the diff. This is the current commit, not the branching point!
commits (`List[str]`):
The list of commits with which to compare the repo at `base_commit` (so the branching point).
Returns:
`List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files
modified are returned if the diff in the file is not only in docstrings or comments, see
`diff_is_docstring_only`).
"""
print("\n### DIFF ###\n")
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
# We always add new python files
if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"):
code_diff.append(diff_obj.b_path)
# We check that deleted python files won't break corresponding tests.
elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"):
code_diff.append(diff_obj.a_path)
# Now for modified files
elif diff_obj.change_type in ["M", "R"] and diff_obj.b_path.endswith(".py"):
# In case of renames, we'll look at the tests using both the old and new name.
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
else:
# Otherwise, we check modifications are in code and not docstrings.
if diff_is_docstring_only(repo, commit, diff_obj.b_path):
print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.")
else:
code_diff.append(diff_obj.a_path)
return code_diff
def get_modified_python_files(diff_with_last_commit: bool = False) -> list[str]:
"""
Return a list of python files that have been modified between:
- the current head and the main branch if `diff_with_last_commit=False` (default)
- the current head and its parent commit otherwise.
Returns:
`List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files
modified are returned if the diff in the file is not only in docstrings or comments, see
`diff_is_docstring_only`).
"""
repo = Repo(PATH_TO_REPO)
if not diff_with_last_commit:
print(f"main is at {repo.refs.main.commit}")
print(f"Current head is at {repo.head.commit}")
branching_commits = repo.merge_base(repo.refs.main, repo.head)
for commit in branching_commits:
print(f"Branching commit: {commit}")
return get_diff(repo, repo.head.commit, branching_commits)
else:
print(f"main is at {repo.head.commit}")
parent_commits = repo.head.commit.parents
for commit in parent_commits:
print(f"Parent commit: {commit}")
return get_diff(repo, repo.head.commit, parent_commits)
def get_diff_for_doctesting(repo: Repo, base_commit: str, commits: list[str]) -> list[str]:
"""
Get the diff in doc examples between a base commit and one or several commits.
Args:
repo (`git.Repo`):
A git repository (for instance the Transformers repo).
base_commit (`str`):
The commit reference of where to compare for the diff. This is the current commit, not the branching point!
commits (`List[str]`):
The list of commits with which to compare the repo at `base_commit` (so the branching point).
Returns:
`List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files
modified are returned if the diff in the file is only in doctest examples).
"""
print("\n### DIFF ###\n")
code_diff = []
for commit in commits:
for diff_obj in commit.diff(base_commit):
# We only consider Python files and doc files.
if not diff_obj.b_path.endswith(".py") and not diff_obj.b_path.endswith(".md"):
continue
# We always add new python/md files
if diff_obj.change_type in ["A"]:
code_diff.append(diff_obj.b_path)
# Now for modified files
elif diff_obj.change_type in ["M", "R"]:
# In case of renames, we'll look at the tests using both the old and new name.
if diff_obj.a_path != diff_obj.b_path:
code_diff.extend([diff_obj.a_path, diff_obj.b_path])
else:
# Otherwise, we check modifications contain some doc example(s).
if diff_contains_doc_examples(repo, commit, diff_obj.b_path):
code_diff.append(diff_obj.a_path)
else:
print(f"Ignoring diff in {diff_obj.b_path} as it doesn't contain any doc example.")
return code_diff
def get_all_doctest_files() -> list[str]:
"""
Return the complete list of python and Markdown files on which we run doctest.
At this moment, we restrict this to only take files from `src/` or `docs/source/en/` that are not in `utils/not_doctested.txt`.
Returns:
`List[str]`: The complete list of Python and Markdown files on which we run doctest.
"""
py_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob("**/*.py")]
md_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob("**/*.md")]
test_files_to_run = py_files + md_files
# change to use "/" as path separator
test_files_to_run = ["/".join(Path(x).parts) for x in test_files_to_run]
# don't run doctest for files in `src/transformers/models/deprecated`
test_files_to_run = [x for x in test_files_to_run if "models/deprecated" not in x]
# only include files in `src` or `docs/source/en/`
test_files_to_run = [x for x in test_files_to_run if x.startswith(("src/", "docs/source/en/"))]
# not include init files
test_files_to_run = [x for x in test_files_to_run if not x.endswith(("__init__.py",))]
# These are files not doctested yet.
with open("utils/not_doctested.txt") as fp:
not_doctested = {x.split(" ")[0] for x in fp.read().strip().split("\n")}
# So far we don't have 100% coverage for doctest. This line will be removed once we achieve 100%.
test_files_to_run = [x for x in test_files_to_run if x not in not_doctested]
return sorted(test_files_to_run)
def get_new_doctest_files(repo, base_commit, branching_commit) -> list[str]:
"""
Get the list of files that were removed from "utils/not_doctested.txt", between `base_commit` and
`branching_commit`.
Returns:
`List[str]`: List of files that were removed from "utils/not_doctested.txt".
"""
for diff_obj in branching_commit.diff(base_commit):
# Ignores all but the "utils/not_doctested.txt" file.
if diff_obj.a_path != "utils/not_doctested.txt":
continue
# Loads the two versions
folder = Path(repo.working_dir)
with checkout_commit(repo, branching_commit):
with open(folder / "utils/not_doctested.txt", "r", encoding="utf-8") as f:
old_content = f.read()
with open(folder / "utils/not_doctested.txt", "r", encoding="utf-8") as f:
new_content = f.read()
# Compute the removed lines and return them
removed_content = {x.split(" ")[0] for x in old_content.split("\n")} - {
x.split(" ")[0] for x in new_content.split("\n")
}
return sorted(removed_content)
return []
def get_doctest_files(diff_with_last_commit: bool = False) -> list[str]:
"""
Return a list of python and Markdown files where doc example have been modified between:
- the current head and the main branch if `diff_with_last_commit=False` (default)
- the current head and its parent commit otherwise.
Returns:
`List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files
modified are returned if the diff in the file is only in doctest examples).
"""
repo = Repo(PATH_TO_REPO)
test_files_to_run = [] # noqa
if not diff_with_last_commit:
print(f"main is at {repo.refs.main.commit}")
print(f"Current head is at {repo.head.commit}")
branching_commits = repo.merge_base(repo.refs.main, repo.head)
for commit in branching_commits:
print(f"Branching commit: {commit}")
test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, branching_commits)
else:
print(f"main is at {repo.head.commit}")
parent_commits = repo.head.commit.parents
for commit in parent_commits:
print(f"Parent commit: {commit}")
test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, parent_commits)
all_test_files_to_run = get_all_doctest_files()
# Add to the test files to run any removed entry from "utils/not_doctested.txt".
new_test_files = get_new_doctest_files(repo, repo.head.commit, repo.refs.main.commit)
test_files_to_run = list(set(test_files_to_run + new_test_files))
# Do not run slow doctest tests on CircleCI
with open("utils/slow_documentation_tests.txt") as fp:
slow_documentation_tests = set(fp.read().strip().split("\n"))
test_files_to_run = [
x for x in test_files_to_run if x in all_test_files_to_run and x not in slow_documentation_tests
]
# Make sure we did not end up with a test file that was removed
test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()]
return sorted(test_files_to_run)
# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line.
# \s*from\s+(\.+\S+)\s+import\s+([^\n]+) -> Line only contains from .xxx import yyy and we catch .xxx and yyy
# (?=\n) -> Look-ahead to a new line. We can't just put \n here or using find_all on this re will only catch every
# other import.
_re_single_line_relative_imports = re.compile(r"(?:^|\n)\s*from\s+(\.+\S+)\s+import\s+([^\n]+)(?=\n)")
# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line.
# \s*from\s+(\.+\S+)\s+import\s+\(([^\)]+)\) -> Line continues with from .xxx import (yyy) and we catch .xxx and yyy
# yyy will take multiple lines otherwise there wouldn't be parenthesis.
_re_multi_line_relative_imports = re.compile(r"(?:^|\n)\s*from\s+(\.+\S+)\s+import\s+\(([^\)]+)\)")
# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line.
# \s*from\s+transformers(\S*)\s+import\s+([^\n]+) -> Line only contains from transformers.xxx import yyy and we catch
# .xxx and yyy
# (?=\n) -> Look-ahead to a new line. We can't just put \n here or using find_all on this re will only catch every
# other import.
_re_single_line_direct_imports = re.compile(r"(?:^|\n)\s*from\s+transformers(\S*)\s+import\s+([^\n]+)(?=\n)")
# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line.
# \s*from\s+transformers(\S*)\s+import\s+\(([^\)]+)\) -> Line continues with from transformers.xxx import (yyy) and we
# catch .xxx and yyy. yyy will take multiple lines otherwise there wouldn't be parenthesis.
_re_multi_line_direct_imports = re.compile(r"(?:^|\n)\s*from\s+transformers(\S*)\s+import\s+\(([^\)]+)\)")
def extract_imports(module_fname: str, cache: Optional[dict[str, list[str]]] = None) -> list[str]:
"""
Get the imports a given module makes.
Args:
module_fname (`str`):
The name of the file of the module where we want to look at the imports (given relative to the root of
the repo).
cache (Dictionary `str` to `List[str]`, *optional*):
To speed up this function if it was previously called on `module_fname`, the cache of all previously
computed results.
Returns:
`List[str]`: The list of module filenames imported in the input `module_fname` (a submodule we import from that
is a subfolder will give its init file).
"""
if cache is not None and module_fname in cache:
return cache[module_fname]
with open(PATH_TO_REPO / module_fname, "r", encoding="utf-8") as f:
content = f.read()
# Filter out all docstrings to not get imports in code examples. As before we need to deactivate formatting to
# keep this as escaped quotes and avoid this function failing on this file.
splits = content.split('\"\"\"') # fmt: skip
content = "".join(splits[::2])
module_parts = str(module_fname).split(os.path.sep)
imported_modules = []
# Let's start with relative imports
relative_imports = _re_single_line_relative_imports.findall(content)
relative_imports = [
(mod, imp) for mod, imp in relative_imports if "# tests_ignore" not in imp and imp.strip() != "("
]
multiline_relative_imports = _re_multi_line_relative_imports.findall(content)
relative_imports += [(mod, imp) for mod, imp in multiline_relative_imports if "# tests_ignore" not in imp]
# We need to remove parts of the module name depending on the depth of the relative imports.
for module, imports in relative_imports:
level = 0
while module.startswith("."):
module = module[1:]
level += 1
if len(module) > 0:
dep_parts = module_parts[: len(module_parts) - level] + module.split(".")
else:
dep_parts = module_parts[: len(module_parts) - level]
imported_module = os.path.sep.join(dep_parts)
imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")]))
# Let's continue with direct imports
direct_imports = _re_single_line_direct_imports.findall(content)
direct_imports = [(mod, imp) for mod, imp in direct_imports if "# tests_ignore" not in imp and imp.strip() != "("]
multiline_direct_imports = _re_multi_line_direct_imports.findall(content)
direct_imports += [(mod, imp) for mod, imp in multiline_direct_imports if "# tests_ignore" not in imp]
# We need to find the relative path of those imports.
for module, imports in direct_imports:
import_parts = module.split(".")[1:] # ignore the name of the repo since we add it below.
dep_parts = ["src", "transformers"] + import_parts
imported_module = os.path.sep.join(dep_parts)
imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")]))
result = []
# Double check we get proper modules (either a python file or a folder with an init).
for module_file, imports in imported_modules:
if (PATH_TO_REPO / f"{module_file}.py").is_file():
module_file = f"{module_file}.py"
elif (PATH_TO_REPO / module_file).is_dir() and (PATH_TO_REPO / module_file / "__init__.py").is_file():
module_file = os.path.sep.join([module_file, "__init__.py"])
imports = [imp for imp in imports if len(imp) > 0 and re.match("^[A-Za-z0-9_]*$", imp)]
if len(imports) > 0:
result.append((module_file, imports))
if cache is not None:
cache[module_fname] = result
return result
def get_module_dependencies(module_fname: str, cache: Optional[dict[str, list[str]]] = None) -> list[str]:
"""
Refines the result of `extract_imports` to remove subfolders and get a proper list of module filenames: if a file
as an import `from utils import Foo, Bar`, with `utils` being a subfolder containing many files, this will traverse
the `utils` init file to check where those dependencies come from: for instance the files utils/foo.py and utils/bar.py.
Warning: This presupposes that all intermediate inits are properly built (with imports from the respective
submodules) and work better if objects are defined in submodules and not the intermediate init (otherwise the
intermediate init is added, and inits usually have a lot of dependencies).
Args:
module_fname (`str`):
The name of the file of the module where we want to look at the imports (given relative to the root of
the repo).
cache (Dictionary `str` to `List[str]`, *optional*):
To speed up this function if it was previously called on `module_fname`, the cache of all previously
computed results.
Returns:
`List[str]`: The list of module filenames imported in the input `module_fname` (with submodule imports refined).
"""
dependencies = []
imported_modules = extract_imports(module_fname, cache=cache)
# The while loop is to recursively traverse all inits we may encounter: we will add things as we go.
while len(imported_modules) > 0:
new_modules = []
for module, imports in imported_modules:
if "models" in module.split("/") and module.split("/")[-1].startswith("convert_"):
continue
# If we end up in an __init__ we are often not actually importing from this init (except in the case where
# the object is fully defined in the __init__)
if module.endswith("__init__.py"):
# So we get the imports from that init then try to find where our objects come from.
new_imported_modules = dict(extract_imports(module, cache=cache))
# Add imports via `define_import_structure` after the #35167 as we remove explicit import in `__init__.py`
from transformers.utils.import_utils import define_import_structure
new_imported_modules_from_import_structure = define_import_structure(PATH_TO_REPO / module)
for mapping in new_imported_modules_from_import_structure.values():
for _module, _imports in mapping.items():
# Import Structure returns _module keys as import paths rather than local paths
# We replace with os.path.sep so that it's Windows-compatible
_module = _module.replace(".", os.path.sep)
_module = module.replace("__init__.py", f"{_module}.py")
if _module not in new_imported_modules:
new_imported_modules[_module] = list(_imports)
else:
original_imports = new_imported_modules[_module]
for potential_new_item in list(_imports):
if potential_new_item not in original_imports:
new_imported_modules[_module].append(potential_new_item)
for new_module, new_imports in new_imported_modules.items():
if any(i in new_imports for i in imports):
if new_module not in dependencies:
new_modules.append((new_module, [i for i in new_imports if i in imports]))
imports = [i for i in imports if i not in new_imports]
if len(imports) > 0:
# If there are any objects lefts, they may be a submodule
path_to_module = PATH_TO_REPO / module.replace("__init__.py", "")
dependencies.extend(
[
os.path.join(module.replace("__init__.py", ""), f"{i}.py")
for i in imports
if (path_to_module / f"{i}.py").is_file()
]
)
imports = [i for i in imports if not (path_to_module / f"{i}.py").is_file()]
if len(imports) > 0:
# Then if there are still objects left, they are fully defined in the init, so we keep it as a
# dependency.
dependencies.append(module)
else:
dependencies.append(module)
imported_modules = new_modules
return dependencies
def create_reverse_dependency_tree() -> list[tuple[str, str]]:
"""
Create a list of all edges (a, b) which mean that modifying a impacts b with a going over all module and test files.
"""
cache = {}
all_modules = list(PATH_TO_TRANFORMERS.glob("**/*.py"))
all_modules = [x for x in all_modules if not ("models" in x.parts and x.parts[-1].startswith("convert_"))]
all_modules += list(PATH_TO_TESTS.glob("**/*.py"))
all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules]
edges = [(dep, mod) for mod in all_modules for dep in get_module_dependencies(mod, cache=cache)]
return list(set(edges))
def get_tree_starting_at(module: str, edges: list[tuple[str, str]]) -> list[Union[str, list[str]]]:
"""
Returns the tree starting at a given module following all edges.
Args:
module (`str`): The module that will be the root of the subtree we want.
eges (`List[Tuple[str, str]]`): The list of all edges of the tree.
Returns:
`List[Union[str, List[str]]]`: The tree to print in the following format: [module, [list of edges
starting at module], [list of edges starting at the preceding level], ...]
"""
vertices_seen = [module]
new_edges = [edge for edge in edges if edge[0] == module and edge[1] != module and "__init__.py" not in edge[1]]
tree = [module]
while len(new_edges) > 0:
tree.append(new_edges)
final_vertices = list({edge[1] for edge in new_edges})
vertices_seen.extend(final_vertices)
new_edges = [
edge
for edge in edges
if edge[0] in final_vertices and edge[1] not in vertices_seen and "__init__.py" not in edge[1]
]
return tree
def print_tree_deps_of(module, all_edges=None):
"""
Prints the tree of modules depending on a given module.
Args:
module (`str`): The module that will be the root of the subtree we want.
all_eges (`List[Tuple[str, str]]`, *optional*):
The list of all edges of the tree. Will be set to `create_reverse_dependency_tree()` if not passed.
"""
if all_edges is None:
all_edges = create_reverse_dependency_tree()
tree = get_tree_starting_at(module, all_edges)
# The list of lines is a list of tuples (line_to_be_printed, module)
# Keeping the modules lets us know where to insert each new lines in the list.
lines = [(tree[0], tree[0])]
for index in range(1, len(tree)):
edges = tree[index]
start_edges = {edge[0] for edge in edges}
for start in start_edges:
end_edges = {edge[1] for edge in edges if edge[0] == start}
# We will insert all those edges just after the line showing start.
pos = 0
while lines[pos][1] != start:
pos += 1
lines = lines[: pos + 1] + [(" " * (2 * index) + end, end) for end in end_edges] + lines[pos + 1 :]
for line in lines:
# We don't print the refs that where just here to help build lines.
print(line[0])
def init_test_examples_dependencies() -> tuple[dict[str, list[str]], list[str]]:
"""
The test examples do not import from the examples (which are just scripts, not modules) so we need some extra
care initializing the dependency map, which is the goal of this function. It initializes the dependency map for
example files by linking each example to the example test file for the example framework.
Returns:
`Tuple[Dict[str, List[str]], List[str]]`: A tuple with two elements: the initialized dependency map which is a
dict test example file to list of example files potentially tested by that test file, and the list of all
example files (to avoid recomputing it later).
"""
test_example_deps = {}
all_examples = []
for framework in ["flax", "pytorch", "tensorflow"]:
test_files = list((PATH_TO_EXAMPLES / framework).glob("test_*.py"))
all_examples.extend(test_files)
# Remove the files at the root of examples/framework since they are not proper examples (they are eith utils
# or example test files).
examples = [
f for f in (PATH_TO_EXAMPLES / framework).glob("**/*.py") if f.parent != PATH_TO_EXAMPLES / framework
]
all_examples.extend(examples)
for test_file in test_files:
with open(test_file, "r", encoding="utf-8") as f:
content = f.read()
# Map all examples to the test files found in examples/framework.
test_example_deps[str(test_file.relative_to(PATH_TO_REPO))] = [
str(e.relative_to(PATH_TO_REPO)) for e in examples if e.name in content
]
# Also map the test files to themselves.
test_example_deps[str(test_file.relative_to(PATH_TO_REPO))].append(
str(test_file.relative_to(PATH_TO_REPO))
)
return test_example_deps, all_examples
def create_reverse_dependency_map() -> dict[str, list[str]]:
"""
Create the dependency map from module/test filename to the list of modules/tests that depend on it recursively.
Returns:
`Dict[str, List[str]]`: The reverse dependency map as a dictionary mapping filenames to all the filenames
depending on it recursively. This way the tests impacted by a change in file A are the test files in the list
corresponding to key A in this result.
"""
cache = {}
# Start from the example deps init.
example_deps, examples = init_test_examples_dependencies()
# Add all modules and all tests to all examples
all_modules = list(PATH_TO_TRANFORMERS.glob("**/*.py"))
all_modules = [x for x in all_modules if not ("models" in x.parts and x.parts[-1].startswith("convert_"))]
all_modules += list(PATH_TO_TESTS.glob("**/*.py")) + examples
all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules]
# Compute the direct dependencies of all modules.
direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules}
direct_deps.update(example_deps)
# This recurses the dependencies
something_changed = True
while something_changed:
something_changed = False
for m in all_modules:
for d in direct_deps[m]:
# We stop recursing at an init (cause we always end up in the main init and we don't want to add all
# files which the main init imports)
if d.endswith("__init__.py"):
continue
if d not in direct_deps:
raise ValueError(f"KeyError:{d}. From {m}")
new_deps = set(direct_deps[d]) - set(direct_deps[m])
if len(new_deps) > 0:
direct_deps[m].extend(list(new_deps))
something_changed = True
# Finally we can build the reverse map.
reverse_map = collections.defaultdict(list)
for m in all_modules:
for d in direct_deps[m]:
reverse_map[d].append(m)
# For inits, we don't do the reverse deps but the direct deps: if modifying an init, we want to make sure we test
# all the modules impacted by that init.
for m in [f for f in all_modules if f.endswith("__init__.py")]:
direct_deps = get_module_dependencies(m, cache=cache)
deps = sum([reverse_map[d] for d in direct_deps if not d.endswith("__init__.py")], direct_deps)
reverse_map[m] = list(set(deps) - {m})
return reverse_map
def create_module_to_test_map(
reverse_map: Optional[dict[str, list[str]]] = None, filter_models: bool = False
) -> dict[str, list[str]]:
"""
Extract the tests from the reverse_dependency_map and potentially filters the model tests.
Args:
reverse_map (`Dict[str, List[str]]`, *optional*):
The reverse dependency map as created by `create_reverse_dependency_map`. Will default to the result of
that function if not provided.
filter_models (`bool`, *optional*, defaults to `False`):
Whether or not to filter model tests to only include core models if a file impacts a lot of models.
Returns:
`Dict[str, List[str]]`: A dictionary that maps each file to the tests to execute if that file was modified.
"""
if reverse_map is None:
reverse_map = create_reverse_dependency_map()
# Utility that tells us if a given file is a test (taking test examples into account)
def is_test(fname):
if fname.startswith("tests"):
return True
if fname.startswith("examples") and fname.split(os.path.sep)[-1].startswith("test"):
return True
return False
# Build the test map
test_map = {module: [f for f in deps if is_test(f)] for module, deps in reverse_map.items()}
if not filter_models:
return test_map
# Now we deal with the filtering if `filter_models` is True.
num_model_tests = len(list(PATH_TO_TESTS.glob("models/*")))
def has_many_models(tests):
# We filter to core models when a given file impacts more than half the model tests.
model_tests = {Path(t).parts[2] for t in tests if t.startswith("tests/models/")}
return len(model_tests) > num_model_tests // 2
# for each module (if specified in the argument `module`) of the form `models/my_model` (i.e. starting with it),
# we always keep the tests (those are already in the argument `tests`) which are in `tests/models/my_model`.
# This is to avoid them being excluded when a module has many impacted tests: the directly related test files should
# always be included!
def filter_tests(tests, module=""):
filtered_tests = []
for t in tests:
if (
not t.startswith("tests/models/")
or Path(t).parts[2] in IMPORTANT_MODELS
# at this point, `t` is of the form `tests/models/my_model`, and we check if `models/my_model`
# (i.e. `parts[1:3]`) is in `module`.
or "/".join(Path(t).parts[1:3]) in module
):
filtered_tests += [t]
return filtered_tests
return {
module: (filter_tests(tests, module=module) if has_many_models(tests) else tests)
for module, tests in test_map.items()
}
def _print_list(l) -> str:
"""
Pretty print a list of elements with one line per element and a - starting each line.
"""
return "\n".join([f"- {f}" for f in l])
def infer_tests_to_run(
output_file: str, diff_with_last_commit: bool = False, filter_models: bool = False, test_all: bool = False
):
"""
The main function called by the test fetcher. Determines the tests to run from the diff.
Args:
output_file (`str`):
The path where to store the summary of the test fetcher analysis. Other files will be stored in the same
folder:
- examples_test_list.txt: The list of examples tests to run.
- test_repo_utils.txt: Will indicate if the repo utils tests should be run or not.
- doctest_list.txt: The list of doctests to run.
diff_with_last_commit (`bool`, *optional*, defaults to `False`):
Whether to analyze the diff with the last commit (for use on the main branch after a PR is merged) or with
the branching point from main (for use on each PR).
filter_models (`bool`, *optional*, defaults to `True`):
Whether or not to filter the tests to core models only, when a file modified results in a lot of model
tests.
"""
if not test_all:
modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit)
else:
modified_files = [str(k) for k in PATH_TO_TESTS.glob("*/*") if str(k).endswith(".py") and "test_" in str(k)]
print("\n### test_all is TRUE, FETCHING ALL FILES###\n")
print(f"\n### MODIFIED FILES ###\n{_print_list(modified_files)}")
reverse_map = create_reverse_dependency_map()
impacted_files = modified_files.copy()
for f in modified_files:
if f in reverse_map:
impacted_files.extend(reverse_map[f])
# Remove duplicates
impacted_files = sorted(set(impacted_files))
print(f"\n### IMPACTED FILES ###\n{_print_list(impacted_files)}")
model_impacted = {"/".join(x.split("/")[:3]) for x in impacted_files if x.startswith("tests/models/")}
# Grab the corresponding test files:
if (
any(x in modified_files for x in ["setup.py", ".circleci/create_circleci_config.py"])
or not filter_models
and len(model_impacted) >= NUM_MODELS_TO_TRIGGER_FULL_CI
or commit_flags["test_all"]
):
test_files_to_run = glob.glob("tests/**/test_**.py", recursive=True) + glob.glob(
"examples/**/*.py", recursive=True
)
if len(model_impacted) >= NUM_MODELS_TO_TRIGGER_FULL_CI and filter_models:
print(
f"More than {NUM_MODELS_TO_TRIGGER_FULL_CI - 1} models are impacted and `filter_models=False`. CI is configured to test everything."
)
else:
# All modified tests need to be run.
test_files_to_run = [f for f in modified_files if f.startswith("tests") and "/test_" in f]
impacted_files = get_impacted_files_from_tiny_model_summary(diff_with_last_commit=diff_with_last_commit)
# Then we grab the corresponding test files.
test_map = create_module_to_test_map(reverse_map=reverse_map, filter_models=filter_models)
for f in modified_files + impacted_files:
if f in test_map:
test_files_to_run.extend(test_map[f])
test_files_to_run = sorted(set(test_files_to_run))
# Remove repo utils tests
test_files_to_run = [f for f in test_files_to_run if f.split(os.path.sep)[1] != "repo_utils"]
# Remove SageMaker tests
test_files_to_run = [f for f in test_files_to_run if f.split(os.path.sep)[1] != "sagemaker"]
# Make sure we did not end up with a test file that was removed
test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()]
print(f"\n### TEST TO RUN ###\n{_print_list(test_files_to_run)}")
create_test_list_from_filter(test_files_to_run, out_path="test_preparation/")
doctest_list = get_doctest_files()
print(f"\n### DOCTEST TO RUN ###\n{_print_list(doctest_list)}")
if len(doctest_list) > 0:
doctest_file = Path(output_file).parent / "doctest_list.txt"
with open(doctest_file, "w", encoding="utf-8") as f:
f.write(" ".join(doctest_list))
def filter_tests(output_file: str, filters: list[str]):
"""
Reads the content of the output file and filters out all the tests in a list of given folders.
Args:
output_file (`str` or `os.PathLike`): The path to the output file of the tests fetcher.
filters (`List[str]`): A list of folders to filter.
"""
if not os.path.isfile(output_file):
print("No test file found.")
return
with open(output_file, "r", encoding="utf-8") as f:
test_files = f.read().split(" ")
if len(test_files) == 0 or test_files == [""]:
print("No tests to filter.")
return
if test_files == ["tests"]:
test_files = [os.path.join("tests", f) for f in os.listdir("tests") if f not in ["__init__.py"] + filters]
else:
test_files = [f for f in test_files if f.split(os.path.sep)[1] not in filters]
with open(output_file, "w", encoding="utf-8") as f:
f.write(" ".join(test_files))
def parse_commit_message(commit_message: str) -> dict[str, bool]:
"""
Parses the commit message to detect if a command is there to skip, force all or part of the CI.
Args:
commit_message (`str`): The commit message of the current commit.
Returns:
`Dict[str, bool]`: A dictionary of strings to bools with keys the following keys: `"skip"`,
`"test_all_models"` and `"test_all"`.
"""
if commit_message is None:
return {"skip": False, "no_filter": False, "test_all": False}
command_search = re.search(r"\[([^\]]*)\]", commit_message)
if command_search is not None:
command = command_search.groups()[0]
command = command.lower().replace("-", " ").replace("_", " ")
skip = command in ["ci skip", "skip ci", "circleci skip", "skip circleci"]
no_filter = set(command.split(" ")) == {"no", "filter"}
test_all = set(command.split(" ")) == {"test", "all"}
return {"skip": skip, "no_filter": no_filter, "test_all": test_all}
else:
return {"skip": False, "no_filter": False, "test_all": False}
JOB_TO_TEST_FILE = {
"tests_torch": r"tests/models/.*/test_modeling_(?!(?:flax_|tf_)).*",
"tests_generate": r"tests/models/.*/test_modeling_(?!(?:flax_|tf_)).*",
"tests_tokenization": r"tests/(?:models/.*/test_tokenization.*|test_tokenization_mistral_common\.py)",
"tests_processors": r"tests/models/.*/test_(?!(?:modeling_|tokenization_)).*", # takes feature extractors, image processors, processors
"examples_torch": r"examples/pytorch/.*test_.*",
"tests_exotic_models": r"tests/models/.*(?=layoutlmv|nat|deta|udop|nougat).*",
"tests_custom_tokenizers": r"tests/models/.*/test_tokenization_(?=bert_japanese|openai|clip).*",
# "repo_utils": r"tests/[^models].*test.*", TODO later on we might want to do
"pipelines_torch": r"tests/models/.*/test_modeling_(?!(?:flax_|tf_)).*",
"tests_hub": r"tests/.*",
"tests_onnx": r"tests/models/.*/test_modeling_(?:tf_|(?!flax)).*",
"tests_non_model": r"tests/[^/]*?/test_.*\.py",
}
def create_test_list_from_filter(full_test_list, out_path):
os.makedirs(out_path, exist_ok=True)
all_test_files = "\n".join(full_test_list)
for job_name, _filter in JOB_TO_TEST_FILE.items():
file_name = os.path.join(out_path, f"{job_name}_test_list.txt")
if job_name == "tests_hub":
files_to_test = ["tests"]
else:
files_to_test = list(re.findall(_filter, all_test_files))
print(job_name, file_name)
if len(files_to_test) > 0: # No tests -> no file with test list
with open(file_name, "w") as f:
f.write("\n".join(files_to_test))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_file", type=str, default="test_list.txt", help="Where to store the list of tests to run"
)
parser.add_argument(
"--json_output_file",
type=str,
default="test_map.json",
help="Where to store the tests to run in a dictionary format mapping test categories to test files",
)
parser.add_argument(
"--diff_with_last_commit",
action="store_true",
help="To fetch the tests between the current commit and the last commit",
)
parser.add_argument(
"--filter_tests",
action="store_true",
help="Will filter the pipeline/repo utils tests outside of the generated list of tests.",
)
parser.add_argument(
"--print_dependencies_of",
type=str,
help="Will only print the tree of modules depending on the file passed.",
default=None,
)
parser.add_argument(
"--fetch_all",
action="store_true",
help="Will fetch all tests.",
default=None,
)
args = parser.parse_args()
if args.print_dependencies_of is not None:
print_tree_deps_of(args.print_dependencies_of)
elif args.filter_tests:
filter_tests(args.output_file, ["pipelines", "repo_utils"])
else:
repo = Repo(PATH_TO_REPO)
commit_message = repo.head.commit.message
commit_flags = parse_commit_message(commit_message)
if commit_flags["skip"]:
print("Force-skipping the CI")
quit()
if commit_flags["no_filter"]:
print("Running all tests fetched without filtering.")
if args.fetch_all:
commit_flags["test_all"] = True
if commit_flags["test_all"]:
print("Force-launching all tests")
is_main_branch = not repo.head.is_detached and repo.head.ref == repo.refs.main
diff_with_last_commit = args.diff_with_last_commit
if not diff_with_last_commit and is_main_branch:
print("main branch detected, fetching tests against last commit.")
diff_with_last_commit = True
infer_tests_to_run(
args.output_file,
diff_with_last_commit=diff_with_last_commit,
filter_models=False,
test_all=commit_flags["test_all"],
)
filter_tests(args.output_file, ["repo_utils"])
| transformers/utils/tests_fetcher.py/0 | {
"file_path": "transformers/utils/tests_fetcher.py",
"repo_id": "transformers",
"token_count": 21690
} | 588 |
# BCO Trainer
[](https://huggingface.co/models?other=bco,trl)
TRL supports the Binary Classifier Optimization (BCO).
The [BCO](https://huggingface.co/papers/2404.04656) authors train a binary classifier whose logit serves as a reward so that the classifier maps {prompt, chosen completion} pairs to 1 and {prompt, rejected completion} pairs to 0.
For a full example have a look at [`examples/scripts/bco.py`].
## Expected dataset type
The [`BCOTrainer`] requires an [unpaired preference dataset](dataset_formats#unpaired-preference).
The [`BCOTrainer`] supports both [conversational](dataset_formats#conversational) and [standard](dataset_formats#standard) dataset formats. When provided with a conversational dataset, the trainer will automatically apply the chat template to the dataset.
## Expected model format
The BCO trainer expects a model of `AutoModelForCausalLM`, compared to PPO that expects `AutoModelForCausalLMWithValueHead` for the value function.
## Using the `BCOTrainer`
For a detailed example have a look at the `examples/scripts/bco.py` script. At a high level we need to initialize the `BCOTrainer` with a `model` we wish to train and a reference `ref_model` which we will use to calculate the implicit rewards of the preferred and rejected response.
The `beta` refers to the hyperparameter of the implicit reward, and the dataset contains the 3 entries listed above. Note that the `model` and `ref_model` need to have the same architecture (ie decoder only or encoder-decoder).
```py
training_args = BCOConfig(
beta=0.1,
)
bco_trainer = BCOTrainer(
model,
model_ref,
args=training_args,
train_dataset=train_dataset,
processing_class=tokenizer,
)
```
After this one can then call:
```py
bco_trainer.train()
```
## Underlying Distribution matching (UDM)
In practical scenarios, the thumbs-up and thumbs-down datasets are likely to have divergent underlying distributions of prompts.
Consider an LLM deployed for user feedback: if the model excels in writing tasks but underperforms in coding, the thumbs-up dataset will be dominated by writing-related prompts, while the thumbs-down dataset will contain mostly coding-related prompts.
If the prompts in your desired and undesired datasets differ a lot, it is useful to enable UDM.
Choose an embedding model and tokenizer:
```py
embedding_model = AutoModel.from_pretrained(your_model_id)
embedding_tokenizer = AutoTokenizer.from_pretrained(your_model_id)
# customize this function depending on your embedding model
def embed_prompt(input_ids, attention_mask, model):
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
return outputs.last_hidden_state.mean(dim=1)
embedding_model = Accelerator().prepare_model(self.embedding_model)
embedding_func = partial(embed_prompt, model=embedding_model)
```
Set `prompt_sample_size` to define how many prompts are selected to train the UDM classifier and start the training with the provided embedding function:
```py
training_args = BCOConfig(
beta=0.1,
prompt_sample_size=512,
)
bco_trainer = BCOTrainer(
model,
model_ref,
args=training_args,
train_dataset=train_dataset,
processing_class=tokenizer,
embedding_func=embedding_func,
embedding_tokenizer=self.embedding_tokenizer,
)
bco_trainer.train()
```
### For Mixture of Experts Models: Enabling the auxiliary loss
MOEs are the most efficient if the load is about equally distributed between experts.
To ensure that we train MOEs similarly during preference-tuning, it is beneficial to add the auxiliary loss from the load balancer to the final loss.
This option is enabled by setting `output_router_logits=True` in the model config (e.g. MixtralConfig).
To scale how much the auxiliary loss contributes to the total loss, use the hyperparameter `router_aux_loss_coef=...` (default: 0.001).
## BCOTrainer
[[autodoc]] BCOTrainer
- train
- save_model
- push_to_hub
## BCOConfig
[[autodoc]] BCOConfig
| trl/docs/source/bco_trainer.md/0 | {
"file_path": "trl/docs/source/bco_trainer.md",
"repo_id": "trl",
"token_count": 1246
} | 589 |
# Paper Index
<Tip warning={true}>
Section under construction. Feel free to contribute!
</Tip>
## Group Sequence Policy Optimization
**📜 Paper**: https://huggingface.co/papers/2507.18071
GSPO is a GRPO variant that computes importance sampling weights at the sequence level instead of per-token. To reproduce the paper's setting, use this configuration:
```python
from trl import GRPOConfig
training_args = GRPOConfig(
importance_sampling_level="sequence",
loss_type="grpo",
beta=0.0, # GSPO set kl regularization to zero: https://github.com/volcengine/verl/pull/2775#issuecomment-3131807306
epsilon=3e-4, # GSPO paper (v2), section 5.1
epsilon_high=4e-4, # GSPO paper (v2), section 5.1
gradient_accumulation_steps=1,
steps_per_generation=4, # partition rollout batch into 4 mini-batches. GSPO paper (v2), section 5.1. Must be 4 times gradient_accumulation_steps
)
```
## DAPO: An Open-Source LLM Reinforcement Learning System at Scale
**📜 Paper**: https://huggingface.co/papers/2503.14476
The DAPO algorithm, includes 5 key components:
- Overlong Filtering
- Clip-Higher
- Soft Overlong Punishment
- Token-level Loss
- Dynamic Sampling (⚠️ Not supported in TRL)
To reproduce the paper's setting, use this configuration:
```python
from trl import GRPOConfig, GRPOTrainer
training_args = GRPOConfig(
# Overlong Filtering
mask_truncated_completions=True,
# Token-level Loss
loss_type="dapo",
# Clip-Higher
epsilon_high=0.28, # DAPO paper: section 4.1
epsilon=0.2, # DAPO paper: section 4.1
# Other parameters used
per_device_train_batch_size=512, # mini-batch size for training in the paper, DAPO paper: section 4.1
num_generations=16, # number of sample responses in the paper, DAPO paper: section 4.1
max_completion_length=20480, # maximum number of tokens for generation in the paper, DAPO paper: section 4.1
beta=0.0 # section 2.3, DAPO paper
)
# Soft Overlong Punishment
sop_reward = get_soft_overlong_punishment(max_completion_len=20480, soft_punish_cache=4096) # DAPO paper: section 4.1
trainer = GRPOTrainer(
...,
args=training_args,
reward_funcs=[..., sop_reward],
)
```
## Dr. GRPO: Understanding R1-Zero-Like Training: A Critical Perspective
**📜 Paper**: https://huggingface.co/papers/2503.20783
A study of R1-Zero training identifies pretraining effects on RL performance and proffers Dr. GRPO to enhance token efficiency, achieving superior accuracy on AIME 2024. To reproduce the paper's setting, use this configuration:
```python
from trl import GRPOConfig
training_args = GRPOConfig(
loss_type="dr_grpo",
per_device_train_batch_size=1, # train_batch_size_per_device in the Training section of the repository
num_generations=8, # num_samples in the Training section of the repository
max_prompt_length=1024, # prompt_max_length in the Training section of the repository
max_completion_length=3000, # generate_max_length in the Training section of the repository
beta=0.0, # beta in the Training section of the repository
)
```
## Direct Preference Optimization (DPO): Your Language Model is Secretly a Reward Model
**📜 Paper**: https://huggingface.co/papers/2305.18290
Direct Preference Optimization (DPO) fine-tunes language models more efficiently and with better performance compared to reinforcement learning from human feedback (RLHF), by directly optimizing policy training based on human preferences. To reproduce the paper's setting, use this configuration:
```python
from trl import DPOConfig
training_args = DPOConfig(
loss_type="sigmoid", # losses in Appendix B of the paper
per_device_train_batch_size=64, # batch size in Appendix B of the paper
learning_rate=1e-6, # learning rate in Appendix B of the paper
beta=0.1, # beta in Appendix B of the paper
)
```
## AlphaPO -- Reward shape matters for LLM alignment
**📜 Paper**: https://huggingface.co/papers/2501.03884
AlphaPO is a new Direct Alignment Algorithms (DAAs) method that leverages an alpha-parameter to help change the shape of the reward function beyond the standard log reward. AlphaPO helps maintain fine-grained control over likelihood displacement and over-optimization. To reproduce the paper's setting, use this configuration:
```python
from trl import CPOConfig
# Mistral-Instruct from Table 3 of the paper
training_args = CPOConfig(
loss_type="alphapo",
alpha=0.25,
beta=2.5,
simpo_gamma=0.1,
learning_rate=7e-7,
...
)
```
## EMA Without the Lag: Bias-Corrected Iterate Averaging Schemes
**📜 Paper**: https://huggingface.co/papers/2508.00180
Bias-Corrected Exponential Moving Average (BEMA) improves the stability and efficiency of language model fine-tuning by reducing stochasticity and eliminating bias. To use BEMA with SFT as described in the paper, you can use the [`BEMACallback`]:
```python
from trl import BEMACallback, SFTTrainer
trainer = SFTTrainer(
...
callbacks=[BEMACallback()],
)
```
## Part I: Tricks or Traps? A Deep Dive into RL for LLM Reasoning (Lite PPO)
**📜 Paper**: https://huggingface.co/papers/2508.08221
The authors of this paper find that the combination of:
1. scaling rewards by the standard deviation computed over the entire batch and
2. aggregating loss over the total number of tokens
can unlock the learning capability of critic-free policies using vanilla PPO loss. Their results demonstrate that this simple combination consistently improves performance, surpassing strategies like GRPO and [DAPO](https://huggingface.co/papers/2503.14476).
TRL supports using these learnings to train a GRPO model by:
```python
from trl import GRPOConfig
training_args = GRPOConfig(
...
scale_rewards="group",
loss_type="bnpo",
# Other parameters used
beta=0.0, # = init_kl_coef in the paper
top_p=0.99,
top_k=100,
temperature=0.99,
num_completions=8, # = num_return_sequences in the paper
num_iterations=1, # = ppo_epochs in the paper
per_device_train_batch_size=4
gradient_accumulation_steps=32,
steps_per_generation=8, # (rollout_batch_size*num_return_sequences) / (per_device_train_batch_size*gradient_accumulation_steps)
)
```
Note that when using gradient accumulation, the loss is aggregated over the total number of tokens in the batch, but not over the accumulated batch. For more details, see the [GRPO Trainer - Loss types](grpo_trainer#loss_types).
| trl/docs/source/paper_index.md/0 | {
"file_path": "trl/docs/source/paper_index.md",
"repo_id": "trl",
"token_count": 2109
} | 590 |
# vLLM Integration
This document will guide you through the process of using vLLM with TRL for faster generation in online methods like GRPO and Online DPO. We first summarize a tl;dr on how to use vLLM with TRL, and then we will go into the details of how it works under the hood. Let's go! 🔥
## 🚀 How can I use vLLM with TRL to speed up training?
💡 **Note**: Resources required for this specific example: a single node with 8 GPUs.
<Tip warning={true}>
vLLM server and TRL trainer must use different CUDA devices to avoid conflicts.
</Tip>
First, install vLLM using the following command:
```bash
pip install "trl[vllm]"
```
Then run the server on specific GPUs (e.g., GPUs 0-3):
```sh
CUDA_VISIBLE_DEVICES=0,1,2,3 trl vllm-serve --model Qwen/Qwen2.5-7B --tensor-parallel-size 2 --data-parallel-size 2
```
Once the server is running, you can use it to generate completions for training. In the example below, we are using the `GRPOTrainer` to train a model using the vLLM server for generation. The `--tensor-parallel-size` and `--data-parallel-size` arguments control how the model and data are sharded across GPUs.
In this example, we are sharding two copies of the model across 4 GPUs. Increasing data parallelism increases throughput, while increasing tensor parallelism allows for serving larger models. Then, run the training script on different GPUs (e.g., GPUs 4-7) by passing `use_vllm=True` in the training arguments as follows:
Sample of a simple `train.py` script:
```python
from datasets import load_dataset
from trl import GRPOTrainer, GRPOConfig
dataset = load_dataset("trl-lib/tldr", split="train")
# Dummy reward function: count the number of unique characters in the completions
def reward_num_unique_chars(completions, **kwargs):
return [len(set(c)) for c in completions]
training_args = GRPOConfig(
output_dir="my_test",
use_vllm=True,
bf16=True,
gradient_checkpointing=True,
)
trainer = GRPOTrainer(
model="Qwen/Qwen2.5-7B",
args=training_args,
reward_funcs=reward_num_unique_chars,
train_dataset=dataset,
)
trainer.train()
```
And the train command on separate GPUs from the server:
```sh
CUDA_VISIBLE_DEVICES=4,5,6,7 accelerate launch train.py
```
## 🎬 Flashback: Why do we need to use vLLM in online methods?
Online methods like GRPO or Online DPO require the model to generate completions during training, which are then used to compute reward signals. However, generation can be extremely time-consuming, especially with large or reasoning models. In the default setup (without vLLM), completions are generated using the [(unwrapped) model's `generate` method](https://github.com/huggingface/trl/blob/f3e8c2304428ef16e9ae5de9e5741ed84d533b7b/trl/trainer/grpo_trainer.py#L965C39-L965C66). This approach quickly becomes a major bottleneck — generation is slow and inefficient, particularly for large batches or models. As a result, training times increase significantly, and overall efficiency drops. To address this, we turn to vLLM, which enables much faster and more scalable generation, helping eliminate this bottleneck in online methods.
## 🤔 How does vLLM solve the slow generation issue?
If you've ever done autoregressive decoder training, you know all the input tokens to the LLM produce their attention key and value tensors, and these tensors are kept in GPU memory to later generate subsequent tokens based on them. These cached key and value tensors are often referred to as the KV cache. However, storing the KV cache occupies a lot of memory, so vLLM uses a technique called **PagedAttention** to solve this problem. PagedAttention, which is inspired by the OS’s virtual memory concept, stores continuous keys and values in **non-contiguous memory space**, which is much more efficient. The details of this are beyond the scope of this document, but in short, it allows the model to store the keys and values in a more efficient way, reducing the memory footprint and speeding up the generation process. If you are interested, make sure to check out the [vLLM PagedAttention](https://blog.vllm.ai/2023/06/20/vllm.html) for more details.
## 🤔 What exactly happens when you run `trl vllm-serve --model <model_name>`?
When you run for example
```sh
CUDA_VISIBLE_DEVICES=0,1,2,3 trl vllm-serve --model Qwen/Qwen2.5-7B --tensor-parallel-size 1 --data-parallel-size 4
```
the following happens:

1. vLLM first spawns multiple workers to handle incoming requests in parallel. The number of workers is determined by multiplying the `--tensor-parallel-size` and `--data-parallel-size` values. In this example, it spawns 4 workers (1 × 4).
Each worker operates independently and processes a chunk of the incoming requests — which are basically the prompts sent to the server for generation. A key point to understand is that these 4 workers are running in parallel, and each one is responsible for handling a subset of the total incoming load.
2. Once the incoming requests (prompts) are distributed across the workers, the model starts generating completions. Internally, the model’s weights are split across multiple GPUs based on the `--tensor-parallel-size` argument — this is how tensor parallelism is handled. Meanwhile, data parallelism (controlled by `--data-parallel-size`) ensures that different sets of requests are processed independently across the workers. In short: tensor parallelism splits the model across GPUs, and data parallelism splits the batch of requests across different model replicas.
3. Although the GPUs process requests independently and in parallel, they still need to communicate with each other. Remember that each GPU handles only a slice of the incoming prompts (for example, with 4 GPUs and 8 prompts using `--data-parallel-size=4`, each GPU processes 2 prompts).
This GPU-to-GPU communication is managed efficiently by NVIDIA’s NCCL library. The communication mainly ensures that each GPU gets its correct portion of the incoming requests — it’s lightweight and doesn’t interfere with generation itself.
Separately, the number of completions to generate per prompt is controlled by the `num_generations` setting in the GRPO config. For instance, if you set `num_generations=2` (like in the picture above), each prompt will have 2 completions. So, with 8 prompts and `num_generations=2`, you would end up with 16 completions total — regardless of the number of GPUs or parallelism settings.
## 🥸 More detail on what happens under the hood when running the server
* The vLLM server starts by running the command: `trl vllm-serve --model Qwen/Qwen2.5-7B`.
* Once the server is running, it generates completions based on requests from the client (trainer) using `vllm_client.generate` [here](https://github.com/huggingface/trl/blob/cc044e35b285be7dc062764b3364e1e684db4c7c/trl/trainer/grpo_trainer.py#L1025-L1035).
* The client (trainer) then requests these completions from the server.
* These completions are used to compute the reward signal.
* Based on the reward signal and the model’s output, the loss is computed, and the backward pass is performed to update the model’s weights.
* **Note**: The server only handles completion generation — it doesn’t train the model. Therefore, the model’s weights aren’t updated on the server. Once the backward pass is complete, the client sends the updated weights to the server using `vllm_client.update_named_param(name, param.data)`.
When using vLLM, ensure the GPUs assigned for training and generation are separate to avoid NCCL communication conflicts. If you do not set the `CUDA_VISIBLE_DEVICES` environment variable, the training script will use all available GPUs by default, which may lead to device conflicts. Starting from TRL next release after v0.19.1, the code automatically detects and prevents same-device usage, raising a error at the vllm server process:
```
RuntimeError: Attempting to use the same CUDA device for multiple distinct roles/ranks within the same communicator.
Ensure that trainer is using different devices than vLLM server.
```
For example, if you want to use GPUs 4–7 for training while the server runs on GPUs 0-3, set:
```sh
CUDA_VISIBLE_DEVICES=4,5,6,7 accelerate launch train.py
```
## 🍷 More customization options with vLLM?
You can customize the server configuration by passing additional arguments.
```
$ trl vllm-serve --help
usage: trl vllm-serve [-h] --model MODEL [--revision REVISION] [--tensor_parallel_size TENSOR_PARALLEL_SIZE]
[--data_parallel_size DATA_PARALLEL_SIZE] [--host HOST] [--port PORT]
[--gpu_memory_utilization GPU_MEMORY_UTILIZATION] [--dtype DTYPE] [--max_model_len MAX_MODEL_LEN]
[--enable_prefix_caching ENABLE_PREFIX_CACHING] [--enforce_eager ENFORCE_EAGER] [--log_level LOG_LEVEL]
options:
-h, --help Show this help message and exit
--model MODEL Model name or path to load the model from. (default: None)
--revision REVISION Revision to use for the model. If not specified, the default branch will be used. (default: None)
--tensor_parallel_size TENSOR_PARALLEL_SIZE, --tensor-parallel-size TENSOR_PARALLEL_SIZE
Number of tensor parallel workers to use. (default: 1)
--data_parallel_size DATA_PARALLEL_SIZE, --data-parallel-size DATA_PARALLEL_SIZE
Number of data parallel workers to use. (default: 1)
--host HOST Host address to run the server on. (default: 0.0.0.0)
--port PORT Port to run the server on. (default: 8000)
--gpu_memory_utilization GPU_MEMORY_UTILIZATION, --gpu-memory-utilization GPU_MEMORY_UTILIZATION
Ratio (between 0 and 1) of GPU memory to reserve for the model weights, activations, and KV cache on the device
dedicated to generation powered by vLLM. Higher values will increase the KV cache size and thus improve the
model's throughput. However, if the value is too high, it may cause out-of-memory (OOM) errors during
initialization. (default: 0.9)
--dtype DTYPE Data type to use for vLLM generation. If set to 'auto', the data type will be automatically determined based on
the model configuration. Find the supported values in the vLLM documentation. (default: auto)
--max_model_len MAX_MODEL_LEN, --max-model-len MAX_MODEL_LEN
If set, the `max_model_len` to use for vLLM. This can be useful when running with reduced
`vllm_gpu_memory_utilization`, leading to a reduced KV cache size. If not set, vLLM will use the model context
size, which might be much larger than the KV cache, leading to inefficiencies. (default: None)
--enable_prefix_caching ENABLE_PREFIX_CACHING, --enable-prefix-caching ENABLE_PREFIX_CACHING
Whether to enable prefix caching in vLLM. If set to `True`, ensure that the model and the hardware support this
feature. (default: None)
--enforce_eager ENFORCE_EAGER, --enforce-eager ENFORCE_EAGER
Whether to enforce eager execution. If set to `True`, we will disable CUDA graph and always execute the model
in eager mode. If `False` (default behavior), we will use CUDA graph and eager execution in hybrid. (default:
None)
--log_level LOG_LEVEL, --log-level LOG_LEVEL
Log level for uvicorn. Possible choices: 'critical', 'error', 'warning', 'info', 'debug', 'trace'. (default:
info)
```
## 🥳 Okay, now that we have the server running, how can we use it to generate completions?
Run the training script and pass `use_vllm=True` in the training arguments:
```python
from trl import GRPOConfig
training_args = GRPOConfig(..., use_vllm=True)
```
## 💆🏻♀️ What's the best distributed setup?


First and foremost, always remember that the optimal setup depends on:
* The model size
* The number of GPUs you have
* The GPU memory size
* The batch size you are using
* The number of requests you are sending to the server (prompts)
* The `max_model_len` you are using (this is the max length of the input sequence that the model can process, a.k.a. the context window size)
* The number of completions you are generating for each request (`num_generations`)
Given these factors, our experiments on the Qwen model family (3B, 7B, 14B, 32B) using 8 H100 GPUs show that:
* For reasonable-sized models (3B–14B) and a moderate context window (`max_len < 8k`), using full capacity for data parallelism gives better throughput. The setup `(tp=1, dp=8)` yields the best results.
* For larger models (32B) and longer context windows (`max_len > 8k`), a smaller DP size combined with some model-side parallelism performs better. For example, `(tp=2, dp=4)` is a good setup for 32B models with a larger context window.
## vLLM with Transformers Backend
vLLM now supports transformers backend for model implementations. Simply passing in `transformers` in `vllm_model_impl` in configurations or through argument parser will set use transformers backend. This works for both LLMs and VLMs. See an example below, you can get more information [here](https://blog.vllm.ai/2025/04/11/transformers-backend.html).
```
CUDA_DEVICE_ORDER=PCI_BUS_ID CUDA_VISIBLE_DEVICES=0 trl vllm-serve --model Qwen/Qwen
2.5-VL-3B-Instruct --tensor-parallel-size 1 --port 8000 --enforce_eager --vllm_model_impl transformers
```
| trl/docs/source/vllm_integration.md/0 | {
"file_path": "trl/docs/source/vllm_integration.md",
"repo_id": "trl",
"token_count": 4407
} | 591 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset
from huggingface_hub import ModelCard
from transformers import HfArgumentParser
@dataclass
class ScriptArguments:
r"""
Arguments for the script.
Args:
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether to push the dataset to the Hugging Face Hub.
repo_id (`str`, *optional*, defaults to `"trl-lib/prm800k"`):
Hugging Face repository ID to push the dataset to.
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
Number of workers to use for dataset processing.
"""
push_to_hub: bool = field(
default=False,
metadata={"help": "Whether to push the dataset to the Hugging Face Hub."},
)
repo_id: str = field(
default="trl-lib/prm800k",
metadata={"help": "Hugging Face repository ID to push the dataset to."},
)
dataset_num_proc: Optional[int] = field(
default=None,
metadata={"help": "Number of workers to use for dataset processing."},
)
def process_example(example):
outputs = []
prompt = example["question"]["problem"]
# Iterate through each step
previous_completions = []
previous_labels = []
for step in example["label"]["steps"]:
if step["completions"] is None and step["human_completion"] is None and step["chosen_completion"] is None:
# happens sometimes
break
# Loop through completions
for completion_idx, completion in enumerate(step["completions"]):
# For every completion that are not chosen, we are in a terminal state, so we can add it to the list of outputs.
if completion_idx != step["chosen_completion"]:
content = completion["text"]
completions = previous_completions[:] + [content]
label = completion["rating"] == 1
labels = previous_labels[:] + [label]
outputs.append({"prompt": prompt, "completions": completions, "labels": labels})
# Now, exapand the previous completions and labels
if step["chosen_completion"] is not None:
chosen_completion = step["completions"][step["chosen_completion"]]
label = chosen_completion["rating"] == 1
elif step["human_completion"] is not None:
chosen_completion = step["human_completion"]
label = True
else:
break
content = chosen_completion["text"]
previous_completions.append(content)
previous_labels.append(label)
# Last step: we are in a terminal state, so we can add it to the list of outputs
outputs.append({"prompt": prompt, "completions": previous_completions, "labels": previous_labels})
return outputs
def process_batch(examples):
outputs = []
batch_size = len(examples["label"])
for idx in range(batch_size):
example = {k: v[idx] for k, v in examples.items()}
outputs.extend(process_example(example))
# list of dict to dict of list
outputs = {k: [v[k] for v in outputs] for k in outputs[0]}
return outputs
model_card = ModelCard("""
---
tags: [trl]
---
# PRM800K Dataset
## Summary
The PRM800K dataset is a processed version of [OpenAI's PRM800K](https://github.com/openai/prm800k), designed to train models using the [TRL library](https://github.com/huggingface/trl) for stepwise supervision tasks. It contains 800,000 step-level correctness labels for model-generated solutions to problems from the MATH dataset. This dataset enables models to learn and verify each step of a solution, enhancing their reasoning capabilities.
## Data Structure
- **Format**: [Standard](https://huggingface.co/docs/trl/main/dataset_formats#standard)
- **Type**: [Stepwise supervision](https://huggingface.co/docs/trl/main/dataset_formats#stepwise-supervision)
Columns:
- `"prompt"`: The problem statement.
- `"completions"`: A list of reasoning steps generated to solve the problem.
- `"labels"`: A list of booleans or floats indicating the correctness of each corresponding reasoning step.
This structure allows models to learn the correctness of each step in a solution, facilitating improved reasoning and problem-solving abilities.
## Generation script
The script used to generate this dataset can be found [here](https://github.com/huggingface/trl/blob/main/examples/datasets/prm800k.py).
""")
if __name__ == "__main__":
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
data_files = {
"train": "https://github.com/openai/prm800k/raw/refs/heads/main/prm800k/data/phase1_train.jsonl",
"test": "https://github.com/openai/prm800k/raw/refs/heads/main/prm800k/data/phase1_test.jsonl",
}
dataset = load_dataset("json", data_files=data_files)
dataset = dataset.map(
process_batch,
batched=True,
batch_size=10,
remove_columns=[
"labeler",
"timestamp",
"generation",
"is_quality_control_question",
"is_initial_screening_question",
"question",
"label",
],
num_proc=script_args.dataset_num_proc,
)
if script_args.push_to_hub:
dataset.push_to_hub(script_args.repo_id)
model_card.push_to_hub(script_args.repo_id, repo_type="dataset")
| trl/examples/datasets/prm800k.py/0 | {
"file_path": "trl/examples/datasets/prm800k.py",
"repo_id": "trl",
"token_count": 2257
} | 592 |
# RLHF pipeline for the creation of StackLLaMa: a Stack exchange llama-7b model.
There were three main steps to the training process:
1. Supervised fine-tuning of the base llama-7b model to create llama-7b-se:
- `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/supervised_finetuning.py --model_path=<LLAMA_MODEL_PATH> --streaming --learning_rate 1e-5 --max_steps 5000 --output_dir ./llama-se`
2. Reward modeling using dialog pairs from the SE dataset using the llama-7b-se to create llama-7b-se-rm:
- `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/reward_modeling.py --model_name=<LLAMA_SE_MODEL>`
3. RL fine-tuning of llama-7b-se with the llama-7b-se-rm reward model:
- `accelerate launch --multi_gpu --num_machines 1 --num_processes 8 examples/research_projects/stack_llama/scripts/rl_training.py --log_with=wandb --model_name=<LLAMA_SE_MODEL> --reward_model_name=<LLAMA_SE_RM_MODEL> --adafactor=False --tokenizer_name=<LLAMA_TOKENIZER> --save_freq=100 --output_max_length=128 --batch_size=8 --gradient_accumulation_steps=8 --batched_gen=True --ppo_epochs=4 --seed=0 --learning_rate=1.4e-5 --early_stopping=True --output_dir=llama-se-rl-finetune-128-8-8-1.4e-5_adam`
LoRA layers were using at all stages to reduce memory requirements.
At each stage the peft adapter layers were merged with the base model, using:
```shell
python examples/research_projects/stack_llama/scripts/merge_peft_adapter.py --adapter_model_name=XXX --base_model_name=YYY --output_name=ZZZ
```
Note that this script requires `peft>=0.3.0`.
For access to the base llama-7b model, please see Meta's [release](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) and [request form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform).
| trl/examples/research_projects/stack_llama/scripts/README.md/0 | {
"file_path": "trl/examples/research_projects/stack_llama/scripts/README.md",
"repo_id": "trl",
"token_count": 696
} | 593 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# /// script
# dependencies = [
# "trl @ git+https://github.com/huggingface/trl.git",
# ]
# ///
import shutil
from accelerate import PartialState
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoModelForSequenceClassification,
AutoTokenizer,
HfArgumentParser,
)
from trl import ModelConfig, RLOOConfig, RLOOTrainer, ScriptArguments
from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE
"""
python -i examples/scripts/rloo/rloo.py \
--dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \
--dataset_train_split descriptiveness \
--learning_rate 3e-6 \
--num_ppo_epochs 1 \
--num_mini_batches 1 \
--output_dir models/minimal/ppo \
--per_device_train_batch_size 64 \
--gradient_accumulation_steps 1 \
--total_episodes 10000 \
--model_name_or_path EleutherAI/pythia-1b-deduped \
--missing_eos_penalty 1.0
accelerate launch --config_file examples/accelerate_configs/deepspeed_zero3.yaml \
examples/scripts/rloo/rloo.py \
--dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \
--dataset_train_split descriptiveness \
--output_dir models/minimal/rloo \
--rloo_k 2 \
--num_ppo_epochs 1 \
--num_mini_batches 1 \
--learning_rate 3e-6 \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 16 \
--total_episodes 10000 \
--model_name_or_path EleutherAI/pythia-1b-deduped \
--sft_model_path EleutherAI/pythia-1b-deduped \
--reward_model_path EleutherAI/pythia-1b-deduped \
--local_rollout_forward_batch_size 1 \
--missing_eos_penalty 1.0
"""
if __name__ == "__main__":
parser = HfArgumentParser((ScriptArguments, RLOOConfig, ModelConfig))
script_args, training_args, model_args = parser.parse_args_into_dataclasses()
# remove output_dir if exists
shutil.rmtree(training_args.output_dir, ignore_errors=True)
################
# Model & Tokenizer
################
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, padding_side="left", trust_remote_code=model_args.trust_remote_code
)
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
if tokenizer.chat_template is None:
tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE
reward_model = AutoModelForSequenceClassification.from_pretrained(
training_args.reward_model_path, trust_remote_code=model_args.trust_remote_code, num_labels=1
)
ref_policy = AutoModelForCausalLM.from_pretrained(
training_args.sft_model_path, trust_remote_code=model_args.trust_remote_code
)
policy = AutoModelForCausalLM.from_pretrained(
training_args.sft_model_path, trust_remote_code=model_args.trust_remote_code
)
################
# Dataset
################
dataset = load_dataset(
script_args.dataset_name, name=script_args.dataset_config, split=script_args.dataset_train_split
)
eval_samples = 100
train_dataset = dataset.select(range(len(dataset) - eval_samples))
eval_dataset = dataset.select(range(len(dataset) - eval_samples, len(dataset)))
dataset_text_field = "prompt"
def prepare_dataset(dataset, tokenizer):
"""pre-tokenize the dataset before training; only collate during training"""
def tokenize(element):
outputs = tokenizer(
element[dataset_text_field],
padding=False,
)
return {"input_ids": outputs["input_ids"]}
return dataset.map(
tokenize,
batched=True,
remove_columns=dataset.column_names,
num_proc=training_args.dataset_num_proc,
)
# Compute that only on the main process for faster data processing.
# see: https://github.com/huggingface/trl/pull/1255
with PartialState().local_main_process_first():
train_dataset = prepare_dataset(train_dataset, tokenizer)
eval_dataset = prepare_dataset(eval_dataset, tokenizer)
################
# Training
################
trainer = RLOOTrainer(
config=training_args,
processing_class=tokenizer,
policy=policy,
ref_policy=ref_policy,
reward_model=reward_model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
trainer.train()
# Save and push to hub
trainer.save_model(training_args.output_dir)
if training_args.push_to_hub:
trainer.push_to_hub(dataset_name=script_args.dataset_name)
trainer.generate_completions()
| trl/examples/scripts/rloo/rloo.py/0 | {
"file_path": "trl/examples/scripts/rloo/rloo.py",
"repo_id": "trl",
"token_count": 2052
} | 594 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from transformers import AutoTokenizer, GenerationConfig
from trl import AutoModelForCausalLMWithValueHead
from trl.core import LengthSampler
from trl.extras import BestOfNSampler
from .testing_utils import TrlTestCase
def queries_to_scores(list_of_strings):
return [torch.rand(1).item() for _ in list_of_strings]
class BestOfNSamplerTester(TrlTestCase):
"""
Tests the BestOfNSampler class
"""
ref_model_name = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
output_length_sampler = LengthSampler(2, 6)
model = AutoModelForCausalLMWithValueHead.from_pretrained(ref_model_name)
tokenizer = AutoTokenizer.from_pretrained(ref_model_name)
tokenizer.pad_token = tokenizer.eos_token
output_length_sampler = LengthSampler(2, 6)
def test_different_input_types(self):
r"""
Tests if the different input types normalizer works
"""
generation_config = GenerationConfig(
min_length=-1,
top_k=0.0,
top_p=1.0,
do_sample=True,
pad_token_id=self.tokenizer.eos_token_id,
)
output_length_sampler = LengthSampler(2, 6)
best_of_n = BestOfNSampler(
self.model,
self.tokenizer,
queries_to_scores,
length_sampler=output_length_sampler,
generation_config=generation_config,
)
queries = ["hello world", "goodbye world"]
tokenized_queries = [self.tokenizer.encode(query) for query in queries]
various_queries_formats = [
(tokenized_queries[0], 1),
(tokenized_queries, 2),
(torch.tensor(tokenized_queries[1]), 1),
([torch.tensor(query) for query in tokenized_queries], 2),
]
for q, expected_length in various_queries_formats:
results = best_of_n.generate(q)
self.assertIsInstance(results, list)
self.assertEqual(len(results), expected_length)
def test_different_sample_sizes_and_n_candidates_values(self):
r"""
Tests different sample sizes and n_candidates values
"""
generation_config = GenerationConfig(
min_length=-1,
top_k=0.0,
top_p=1.0,
do_sample=True,
pad_token_id=self.tokenizer.eos_token_id,
)
output_length_sampler = LengthSampler(6, 10)
for sample_value, n_candidates_values, expected in [
(4, 2, 2),
(10, 3, 3),
(6, 4, 4),
]:
best_of_n = BestOfNSampler(
self.model,
self.tokenizer,
queries_to_scores,
length_sampler=output_length_sampler,
generation_config=generation_config,
sample_size=sample_value,
n_candidates=n_candidates_values,
)
queries = ["hello world", "troll the world"]
tokenized_queries = [self.tokenizer.encode(query) for query in queries]
results = best_of_n.generate(tokenized_queries)
for result in results:
self.assertEqual(len(result), expected)
| trl/tests/test_best_of_n_sampler.py/0 | {
"file_path": "trl/tests/test_best_of_n_sampler.py",
"repo_id": "trl",
"token_count": 1685
} | 595 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from transformers import AutoModelForCausalLM, GenerationConfig
from trl.models.modeling_base import GeometricMixtureWrapper, create_reference_model
from .testing_utils import TrlTestCase
class TestGeometricMixtureWrapper(TrlTestCase):
def setUp(self):
super().setUp()
model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
self.model = AutoModelForCausalLM.from_pretrained(model_id)
self.ref_model = create_reference_model(self.model)
self.generation_config = GenerationConfig.from_pretrained(model_id)
self.mixture_coef = 0.5
self.wrapper = GeometricMixtureWrapper(
self.model, self.ref_model, self.generation_config, mixture_coef=self.mixture_coef
)
def test_forward(self):
input_ids = torch.tensor([[1, 2, 3, 4, 5]])
attention_mask = torch.ones_like(input_ids)
output = self.wrapper(input_ids=input_ids, attention_mask=attention_mask)
self.assertIsNotNone(output)
self.assertTrue(hasattr(output, "logits"))
self.assertEqual(output.logits.shape, (1, 5, self.model.config.vocab_size))
def test_mixture_coefficient(self):
input_ids = torch.tensor([[1, 2, 3, 4, 5]])
attention_mask = torch.ones_like(input_ids)
with torch.no_grad():
model_output = self.model(input_ids=input_ids, attention_mask=attention_mask)
ref_model_output = self.ref_model(input_ids=input_ids, attention_mask=attention_mask)
wrapper_output = self.wrapper(input_ids=input_ids, attention_mask=attention_mask)
expected_logits = torch.nn.functional.log_softmax(
self.mixture_coef * ref_model_output.logits + (1 - self.mixture_coef) * model_output.logits, dim=-1
)
self.assertTrue(torch.allclose(wrapper_output.logits, expected_logits, atol=1e-5))
def test_prepare_inputs_for_generation(self):
input_ids = torch.tensor([[1, 2, 3, 4, 5]])
attention_mask = torch.ones_like(input_ids)
inputs = self.wrapper.prepare_inputs_for_generation(input_ids, attention_mask=attention_mask, use_cache=True)
self.assertIn("input_ids", inputs)
self.assertIn("attention_mask", inputs)
self.assertFalse(inputs.get("use_cache", False))
| trl/tests/test_modeling_geometric_mixture_wrapper.py/0 | {
"file_path": "trl/tests/test_modeling_geometric_mixture_wrapper.py",
"repo_id": "trl",
"token_count": 1115
} | 596 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer
from transformers.testing_utils import require_peft
from transformers.utils import is_peft_available
from trl import XPOConfig, XPOTrainer
from .testing_utils import RandomPairwiseJudge, TrlTestCase, require_llm_blender
if is_peft_available():
from peft import LoraConfig, get_peft_model
class TestXPOTrainer(TrlTestCase):
def setUp(self):
super().setUp()
self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
self.model = AutoModelForCausalLM.from_pretrained(self.model_id)
self.ref_model = AutoModelForCausalLM.from_pretrained(self.model_id)
self.reward_model = AutoModelForSequenceClassification.from_pretrained(self.model_id, num_labels=1)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.tokenizer.pad_token = self.tokenizer.eos_token
@parameterized.expand([("standard_prompt_only",), ("conversational_prompt_only",)])
def test_xpo_trainer_training(self, config_name):
training_args = XPOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps",
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", config_name)
trainer = XPOTrainer(
model=self.model,
ref_model=self.ref_model,
reward_model=self.reward_model,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
)
trainer.train()
# Check if training loss is available
self.assertIn("train_loss", trainer.state.log_history[-1])
@require_peft
def test_training_with_peft(self):
lora_config = LoraConfig(r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM")
training_args = XPOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
learning_rate=5.0e-7,
eval_strategy="steps",
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only")
trainer = XPOTrainer(
model=self.model,
reward_model=self.reward_model,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
peft_config=lora_config,
)
trainer.train()
# Check if training loss is available
self.assertIn("train_loss", trainer.state.log_history[-1])
@require_peft
def test_training_with_peft_and_ref_model(self):
lora_config = LoraConfig(r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM")
training_args = XPOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
learning_rate=5.0e-7,
eval_strategy="steps",
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only")
trainer = XPOTrainer(
model=self.model,
ref_model=self.ref_model,
reward_model=self.reward_model,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
peft_config=lora_config,
)
trainer.train()
# Check if training loss is available
self.assertIn("train_loss", trainer.state.log_history[-1])
@require_peft
def test_training_with_peft_model_and_peft_config(self):
model_lora_config = LoraConfig(r=8, lora_alpha=16, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM")
model = get_peft_model(self.model, model_lora_config)
# we want only the "train adapter" to be trained
lora_train_config = LoraConfig(r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM")
training_args = XPOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
learning_rate=5.0e-7,
eval_strategy="steps",
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only")
trainer = XPOTrainer(
model=model,
reward_model=self.reward_model,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
peft_config=lora_train_config,
)
trainer.train()
# Check if training loss is available
self.assertIn("train_loss", trainer.state.log_history[-1])
@require_peft
def test_training_pre_pefted_model_implicit_ref(self):
lora_config = LoraConfig(r=8, lora_alpha=16, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM")
peft_model_instance = get_peft_model(self.model, lora_config)
training_args = XPOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=1,
max_steps=2,
learning_rate=5.0e-7,
eval_strategy="no",
report_to="none",
remove_unused_columns=False,
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only")["train"]
trainer = XPOTrainer(
model=peft_model_instance,
ref_model=None,
reward_model=self.reward_model, # Using reward_model to ensure _generate_completions is used as expected
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset,
)
trainer.train()
self.assertIn("train_loss", trainer.state.log_history[-1])
@require_llm_blender
@parameterized.expand([("standard_prompt_only",), ("conversational_prompt_only",)])
def test_xpo_trainer_judge_training(self, config_name):
training_args = XPOConfig(
output_dir=self.tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps",
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", config_name)
judge = RandomPairwiseJudge()
trainer = XPOTrainer(
model=self.model,
ref_model=self.ref_model,
judge=judge,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
)
trainer.train()
# Check if training loss is available
self.assertIn("train_loss", trainer.state.log_history[-1])
| trl/tests/test_xpo_trainer.py/0 | {
"file_path": "trl/tests/test_xpo_trainer.py",
"repo_id": "trl",
"token_count": 3738
} | 597 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Callable, Literal, Optional
import datasets
from datasets import Dataset, Value
from packaging import version
from transformers import AutoTokenizer
if version.parse(datasets.__version__) >= version.parse("4.0.0"):
from datasets import List
FORMAT_MAPPING = {
"chatml": List({"content": Value(dtype="string", id=None), "role": Value(dtype="string", id=None)}),
"instruction": {"completion": Value(dtype="string", id=None), "prompt": Value(dtype="string", id=None)},
}
else:
FORMAT_MAPPING = {
"chatml": [{"content": Value(dtype="string", id=None), "role": Value(dtype="string", id=None)}],
"instruction": {"completion": Value(dtype="string", id=None), "prompt": Value(dtype="string", id=None)},
}
def conversations_formatting_function(
tokenizer: AutoTokenizer, messages_field: Literal["messages", "conversations"], tools: Optional[list] = None
):
r"""
return a callable function that takes in a "messages" dataset and returns a formatted dataset, based on the
tokenizer apply chat template to the dataset along with the schema of the list of functions in the tools list.
"""
def format_dataset(examples):
if isinstance(examples[messages_field][0], list):
output_texts = []
for i in range(len(examples[messages_field])):
output_texts.append(
tokenizer.apply_chat_template(examples[messages_field][i], tokenize=False, tools=tools)
)
return output_texts
else:
return tokenizer.apply_chat_template(examples[messages_field], tokenize=False, tools=tools)
return format_dataset
def instructions_formatting_function(tokenizer: AutoTokenizer):
r"""
return a callable function that takes in an "instructions" dataset and returns a formatted dataset, based on the
tokenizer apply chat template to the dataset
"""
def format_dataset(examples):
if isinstance(examples["prompt"], list):
output_texts = []
for i in range(len(examples["prompt"])):
converted_sample = [
{"role": "user", "content": examples["prompt"][i]},
{"role": "assistant", "content": examples["completion"][i]},
]
output_texts.append(tokenizer.apply_chat_template(converted_sample, tokenize=False))
return output_texts
else:
converted_sample = [
{"role": "user", "content": examples["prompt"]},
{"role": "assistant", "content": examples["completion"]},
]
return tokenizer.apply_chat_template(converted_sample, tokenize=False)
return format_dataset
def get_formatting_func_from_dataset(
dataset: Dataset, tokenizer: AutoTokenizer, tools: Optional[list] = None
) -> Optional[Callable]:
r"""
Finds the correct formatting function based on the dataset structure. Currently supported datasets are:
- `ChatML` with [{"role": str, "content": str}]
- `instruction` with [{"prompt": str, "completion": str}]
Args:
dataset (Dataset): User dataset
tokenizer (AutoTokenizer): Tokenizer used for formatting
Returns:
Callable: Formatting function if the dataset format is supported else None
"""
if isinstance(dataset, Dataset):
if "messages" in dataset.features:
if dataset.features["messages"] == FORMAT_MAPPING["chatml"]:
logging.info("Formatting dataset with chatml format")
return conversations_formatting_function(tokenizer, "messages", tools)
if "conversations" in dataset.features:
if dataset.features["conversations"] == FORMAT_MAPPING["chatml"]:
logging.info("Formatting dataset with chatml format")
return conversations_formatting_function(tokenizer, "conversations", tools)
elif dataset.features == FORMAT_MAPPING["instruction"]:
logging.info("Formatting dataset with instruction format")
return instructions_formatting_function(tokenizer)
return None
| trl/trl/extras/dataset_formatting.py/0 | {
"file_path": "trl/trl/extras/dataset_formatting.py",
"repo_id": "trl",
"token_count": 1783
} | 598 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
def get_soft_overlong_punishment(max_completion_len: int, soft_punish_cache: int) -> Callable:
# docstyle-ignore
r"""
Reward function that penalizes overlong completions. It is used to penalize overlong completions, but not to reward
shorter completions. Reference: Eq. (13) from the DAPO paper (https://huggingface.co/papers/2503.14476)
$$
R_{\text{length}}(y) = \begin{cases}
0, & |y| \le L_{\max} - L_{\text{cache}} \\
\dfrac{(L_{\max} - L_{\text{cache}}) - |y|}{L_{\text{cache}}}, & L_{\max} - L_{\text{cache}} < |y| \le L_{\max} \\
-1, & L_{\max} < |y|
\end{cases}
$$
Args:
max_completion_len (`int`):
Maximum length of the completion, \( L_{\max} \).
soft_punish_cache (`int`):
Minimum length of the completion, \( L_{\text{cache}} \). If set to `0`, no minimum length is applied.
Example:
```python
from trl.rewards import get_soft_overlong_punishment
soft_overlong_punishment = get_soft_overlong_punishment(max_completion_len=100, soft_punish_cache=20)
completion_ids = [[1] * 90] # simulating a completion with 90 tokens. 90 is between 80 and 100.
rewards = soft_overlong_punishment(completion_ids)
print(rewards) # [-0.5]
```
"""
def soft_overlong_punishment_reward(completion_ids: list[list[int]], **kwargs) -> list[float]:
"""Reward function that penalizes overlong completions."""
rewards = []
for ids in completion_ids:
completion_length = len(ids)
if completion_length <= max_completion_len - soft_punish_cache:
rewards.append(0.0)
elif max_completion_len - soft_punish_cache < completion_length <= max_completion_len:
rewards.append((max_completion_len - soft_punish_cache - completion_length) / soft_punish_cache)
else:
rewards.append(-1.0)
return rewards
return soft_overlong_punishment_reward
| trl/trl/rewards/other_rewards.py/0 | {
"file_path": "trl/trl/rewards/other_rewards.py",
"repo_id": "trl",
"token_count": 1001
} | 599 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Optional
from transformers import TrainingArguments
@dataclass
class CPOConfig(TrainingArguments):
r"""
Configuration class for the [`CPOTrainer`].
This class includes only the parameters that are specific to CPO training. For a full list of training arguments,
please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may
differ from those in [`~transformers.TrainingArguments`].
Using [`~transformers.HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
max_length (`int` or `None`, *optional*, defaults to `1024`):
Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
to use the default data collator.
max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
Maximum length of the prompt. This argument is required if you want to use the default data collator.
max_completion_length (`int` or `None`, *optional*, defaults to `None`):
Maximum length of the completion. This argument is required if you want to use the default data collator
and your model is an encoder-decoder.
beta (`float`, *optional*, defaults to `0.1`):
Parameter controlling the deviation from the reference model. Higher β means less deviation from the
reference model. For the IPO loss (`loss_type="ipo"`), β is the regularization parameter denoted by τ in
the [paper](https://huggingface.co/papers/2310.12036).
label_smoothing (`float`, *optional*, defaults to `0.0`):
Label smoothing factor. This argument is required if you want to use the default data collator.
loss_type (`str`, *optional*, defaults to `"sigmoid"`):
Type of loss to use. Possible values are:
- `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper.
- `"hinge"`: hinge loss on the normalized likelihood from the
[SLiC](https://huggingface.co/papers/2305.10425) paper.
- `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper.
- `"simpo"`: SimPO loss from the [SimPO](https://huggingface.co/papers/2405.14734) paper.
- `"alphapo"`: AlphaPO loss from the [AlphaPO](https://huggingface.co/papers/2501.03884) paper. This
automatically sets `loss_type="simpo"` and `cpo_alpha=0.0`.
disable_dropout (`bool`, *optional*, defaults to `True`):
Whether to disable dropout in the model.
cpo_alpha (`float`, *optional*, defaults to `1.0`):
Weight of the BC regularizer in CPO training.
simpo_gamma (`float`, *optional*, defaults to `0.5`):
Target reward margin for the SimPO loss, used only when the `loss_type="simpo"`.
alpha (`float`, *optional*, defaults to `0.0`):
Alpha parameter that controls reward function shape across all loss types. When alpha=0 (default), uses
standard log probability rewards. When `alpha != 0`, applies AlphaPO transformation: `r = (1 - p^(-alpha))
/ alpha` from the [AlphaPO paper](https://huggingface.co/papers/2501.03884). This parameter works with all
loss types.
label_pad_token_id (`int`, *optional*, defaults to `-100`):
Label pad token id. This argument is required if you want to use the default data collator.
padding_value (`int` or `None`, *optional*, defaults to `None`):
Padding value to use. If `None`, the padding value of the tokenizer is used.
truncation_mode (`str`,*optional*, defaults to `"keep_end"`):
Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
This argument is required if you want to use the default data collator.
generate_during_eval (`bool`, *optional*, defaults to `False`):
If `True`, generates and logs completions from the model to W&B or Comet during evaluation.
is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`):
When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
you need to specify if the model returned by the callable is an encoder-decoder model.
model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
string.
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
Number of processes to use for processing the dataset.
"""
_VALID_DICT_FIELDS = TrainingArguments._VALID_DICT_FIELDS + ["model_init_kwargs"]
# Parameters whose default values are overridden from TrainingArguments
learning_rate: float = field(
default=1e-6,
metadata={"help": "The initial learning rate for AdamW."},
)
logging_steps: float = field(
default=10,
metadata={
"help": "Log every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, "
"will be interpreted as ratio of total training steps."
},
)
gradient_checkpointing: bool = field(
default=True,
metadata={
"help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass."
},
)
bf16: Optional[bool] = field(
default=None,
metadata={
"help": "Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA "
"architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if "
"`fp16` is not set."
},
)
max_length: Optional[int] = field(
default=1024,
metadata={"help": "Maximum length of the sequences (prompt + completion) in the batch."},
)
max_prompt_length: Optional[int] = field(
default=512,
metadata={
"help": "Maximum length of the prompt. This argument is required if you want to use the default data "
"collator and your model is an encoder-decoder."
},
)
max_completion_length: Optional[int] = field(
default=None,
metadata={
"help": "Maximum length of the completion. This argument is required if you want to use the default data "
"collator and your model is an encoder-decoder."
},
)
beta: float = field(
default=0.1,
metadata={
"help": "Parameter controlling the deviation from the reference model. Higher β means less deviation from "
"the reference model."
},
)
label_smoothing: float = field(
default=0.0,
metadata={"help": "Label smoothing factor."},
)
loss_type: str = field(
default="sigmoid",
metadata={
"help": "Type of loss to use.",
"choices": ["sigmoid", "hinge", "ipo", "simpo", "alphapo"],
},
)
disable_dropout: bool = field(
default=True,
metadata={"help": "Whether to disable dropout in the model."},
)
cpo_alpha: float = field(
default=1.0,
metadata={"help": "Weight of the BC regularizer in CPO training."},
)
simpo_gamma: float = field(
default=0.5,
metadata={"help": "Target reward margin for the SimPO loss, used only when the `loss_type='simpo'`."},
)
alpha: float = field(
default=0.0,
metadata={
"help": "Alpha parameter that controls reward function shape across all loss types. When alpha=0 "
"(default), uses standard log probability rewards. When `alpha != 0`, applies AlphaPO transformation: "
"`r = (1 - p^(-alpha)) / alpha` from the AlphaPO paper. This parameter works with all loss types."
},
)
label_pad_token_id: int = field(
default=-100,
metadata={"help": "Label pad token id."},
)
padding_value: Optional[int] = field(
default=None,
metadata={"help": "Padding value to use. If `None`, the padding value of the tokenizer is used."},
)
truncation_mode: str = field(
default="keep_end",
metadata={
"help": "Truncation mode to use when the prompt is too long.",
"choices": ["keep_end", "keep_start"],
},
)
generate_during_eval: bool = field(
default=False,
metadata={"help": "If `True`, generates and logs completions from the model to W&B during evaluation."},
)
is_encoder_decoder: Optional[bool] = field(
default=None,
metadata={"help": "Whether the model is an encoder-decoder model."},
)
model_init_kwargs: Optional[dict[str, Any]] = field(
default=None,
metadata={
"help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model "
"from a string."
},
)
dataset_num_proc: Optional[int] = field(
default=None,
metadata={"help": "Number of processes to use for processing the dataset."},
)
def __post_init__(self):
self.bf16 = not (self.fp16) if self.bf16 is None else self.bf16
# Syntactic sugar for AlphaPO: set loss_type to "simpo" and cpo_alpha to 0.0
if self.loss_type == "alphapo":
self.loss_type = "simpo"
self.cpo_alpha = 0.0
super().__post_init__()
| trl/trl/trainer/cpo_config.py/0 | {
"file_path": "trl/trl/trainer/cpo_config.py",
"repo_id": "trl",
"token_count": 4048
} | 600 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from trl.trainer.online_dpo_config import OnlineDPOConfig
@dataclass
class NashMDConfig(OnlineDPOConfig):
r"""
Configuration class for the [`NashMDTrainer`].
Subclass of [`OnlineDPOConfig`] we can use all its arguments and add the following:
Parameters:
mixture_coef (`float` or `list[float]`, *optional*, defaults to `0.5`):
Logit mixture coefficient for the model and reference model. If a list of floats is provided then the
mixture coefficient is selected for each new epoch and the last coefficient is used for the rest of the
epochs.
"""
mixture_coef: list[float] = field(
default_factory=lambda: [0.5],
metadata={
"help": "Logit mixture coefficient for the model and reference model. If a list of floats is provided "
"then the mixture coefficient is selected for each new epoch and the last coefficient is used for the "
"rest of the epochs."
},
)
def __post_init__(self):
super().__post_init__()
if hasattr(self.mixture_coef, "__len__") and len(self.mixture_coef) == 1:
self.mixture_coef = self.mixture_coef[0]
| trl/trl/trainer/nash_md_config.py/0 | {
"file_path": "trl/trl/trainer/nash_md_config.py",
"repo_id": "trl",
"token_count": 624
} | 601 |
# Copyright 2020-2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import importlib.resources as pkg_resources
import json
import random
from collections import deque
from dataclasses import dataclass, field
from importlib.metadata import version
from typing import Any, Literal, Optional, Union
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import torch.utils.data
from accelerate import Accelerator, PartialState, logging
from accelerate.state import AcceleratorState
from huggingface_hub import ModelCard, ModelCardData
from torch.nn.utils.rnn import pad_sequence
from transformers import (
BitsAndBytesConfig,
EvalPrediction,
GenerationConfig,
PreTrainedTokenizerBase,
TrainerState,
TrainingArguments,
is_comet_available,
)
from transformers.utils import (
ModelOutput,
is_peft_available,
is_rich_available,
is_torch_mlu_available,
is_torch_npu_available,
is_torch_xpu_available,
)
from ..trainer.model_config import ModelConfig
if is_rich_available():
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from rich.text import Text
if is_comet_available():
import comet_ml
if is_peft_available():
from peft import LoraConfig, PeftConfig
logger = logging.get_logger(__name__)
@dataclass
class DataCollatorForChatML:
"""
Data collator for ChatML format datasets.
"""
tokenizer: PreTrainedTokenizerBase
ignore_index: int = -100
max_length: int = None
prompt_key: str = "prompt"
messages_key: str = "messages"
def __post_init__(self):
if self.tokenizer.pad_token_id is None:
raise ValueError("The tokenizer does not have a pad token. Please set `pad_token_id` in the tokenizer.")
if self.max_length is None:
# set a sensible default
self.max_length = min(self.tokenizer.model_max_length, 1024)
def __call__(self, examples: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
input_ids = []
attention_mask = []
prompts_input_ids = []
prompt_attention_mask = []
labels = []
for example in examples:
formatted_prompt = example.get(self.prompt_key, None)
if formatted_prompt is None:
prompt = example[self.messages_key][:-1]
formatted_prompt = self.tokenizer.apply_chat_template(
prompt, tokenize=False, add_generation_prompt=True
)
if "input_ids" not in example:
message = example[self.messages_key]
formatted_message = self.tokenizer.apply_chat_template(
message, tokenize=False, add_generation_prompt=False
)
tokenized_message = self.tokenizer(
formatted_message,
truncation=True,
max_length=self.max_length,
padding=False,
return_tensors=None,
add_special_tokens=False,
)
input_ids.append(tokenized_message["input_ids"])
if "attention_mask" in example:
attention_mask.append(tokenized_message["attention_mask"])
else:
attention_mask.append([1] * len(tokenized_message["input_ids"]))
else:
input_ids.append(example["input_ids"])
if "attention_mask" in example:
attention_mask.append(example["attention_mask"])
else:
attention_mask.append([1] * len(example["input_ids"]))
tokenized_prompt = self.tokenizer(
formatted_prompt,
truncation=True,
max_length=len(input_ids[-1]),
padding=False,
return_tensors=None,
add_special_tokens=False,
)
prompts_input_ids.append(tokenized_prompt["input_ids"])
prompt_attention_mask.append(tokenized_prompt["attention_mask"])
# Create the labels that will have all but the completion tokens of the example["input_ids"] set to ignore_index
label = [self.ignore_index] * len(input_ids[-1])
completion_start_idx = len(tokenized_prompt["input_ids"])
label[completion_start_idx:] = input_ids[-1][completion_start_idx:]
labels.append(label)
# convert to list of tensors and pad
input_ids = [torch.tensor(ids, dtype=torch.long) for ids in input_ids]
attention_mask = [torch.tensor(mask, dtype=torch.long) for mask in attention_mask]
labels = [torch.tensor(label, dtype=torch.long) for label in labels]
input_ids = pad(input_ids, padding_side="left", padding_value=self.tokenizer.pad_token_id)
attention_mask = pad(attention_mask, padding_side="left", padding_value=0)
labels = pad(labels, padding_side="left", padding_value=self.ignore_index)
prompts_input_ids = [torch.tensor(ids, dtype=torch.long) for ids in prompts_input_ids]
prompt_attention_mask = [torch.tensor(mask, dtype=torch.long) for mask in prompt_attention_mask]
prompts_input_ids = pad(prompts_input_ids, padding_side="left", padding_value=self.tokenizer.pad_token_id)
prompt_attention_mask = pad(prompt_attention_mask, padding_side="left", padding_value=0)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": labels,
"prompts": prompts_input_ids,
"prompt_attention_mask": prompt_attention_mask,
}
@dataclass
class RewardDataCollatorWithPadding:
r"""
Reward DataCollator class that pads the inputs to the maximum length of the batch.
Args:
tokenizer (`PreTrainedTokenizerBase`):
The tokenizer used for encoding the data.
padding (`Union[bool, str, `PaddingStrategy`]`, `optional`, defaults to `True`):
padding_strategy to pass to the tokenizer.
pad_to_multiple_of (`int` or `None`, `optional`, defaults to `None`):
If set will pad the sequence to a multiple of the provided value.
return_tensors (`str`, `optional`, defaults to `"pt"`):
The tensor type to use.
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str] = True
pad_to_multiple_of: Optional[int] = None
return_tensors: str = "pt"
def __call__(self, features: list[dict[str, Any]]) -> dict[str, Any]:
features_chosen = []
features_rejected = []
margin = []
# check if we have a margin. If we do, we need to batch it as well
has_margin = "margin" in features[0]
for feature in features:
# check if the keys are named as expected
if (
"input_ids_chosen" not in feature
or "input_ids_rejected" not in feature
or "attention_mask_chosen" not in feature
or "attention_mask_rejected" not in feature
):
raise ValueError(
"The features should include `input_ids_chosen`, `attention_mask_chosen`, `input_ids_rejected` and `attention_mask_rejected`"
)
features_chosen.append(
{
"input_ids": feature["input_ids_chosen"],
"attention_mask": feature["attention_mask_chosen"],
}
)
features_rejected.append(
{
"input_ids": feature["input_ids_rejected"],
"attention_mask": feature["attention_mask_rejected"],
}
)
if has_margin:
margin.append(feature["margin"])
batch_chosen = self.tokenizer.pad(
features_chosen,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
)
batch_rejected = self.tokenizer.pad(
features_rejected,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
)
batch = {
"input_ids_chosen": batch_chosen["input_ids"],
"attention_mask_chosen": batch_chosen["attention_mask"],
"input_ids_rejected": batch_rejected["input_ids"],
"attention_mask_rejected": batch_rejected["attention_mask"],
"return_loss": True,
}
if has_margin:
margin = torch.tensor(margin, dtype=torch.float)
batch["margin"] = margin
return batch
def pad(
tensors: list[torch.Tensor],
padding_value: int = 0,
padding_side: str = "right",
pad_to_multiple_of: Optional[int] = None,
) -> torch.Tensor:
"""
Pads a list of tensors to the same shape along the first dimension.
Args:
tensors (`list[torch.Tensor]`):
List of input tensors to pad.
padding_value (`int`):
Value to use for padding. Default is 0.
padding_side (`str`):
Side on which to add padding. Must be 'left' or 'right'. Default is 'right'.
pad_to_multiple_of (`int`, *optional*, defaults to `None`):
If set will pad the sequence to a multiple of the provided value.
Returns:
`torch.Tensor`:
A single tensor containing the padded tensors.
Examples:
```python
>>> import torch
>>> pad([torch.tensor([1, 2, 3]), torch.tensor([4, 5])])
tensor([[1, 2, 3],
[4, 5, 0]])
>>> pad([torch.tensor([[1, 2], [3, 4]]), torch.tensor([[5, 6]])])
tensor([[[1, 2],
[3, 4]],
[[5, 6],
[0, 0]]])
```
"""
# Determine the maximum shape for each dimension
output_shape = np.max([t.shape for t in tensors], 0).tolist()
# Apply pad_to_multiple_of to the first (sequence) dimension
if pad_to_multiple_of is not None:
remainder = output_shape[0] % pad_to_multiple_of
if remainder != 0:
output_shape[0] += pad_to_multiple_of - remainder
# Create an output tensor filled with the padding value
output = torch.full((len(tensors), *output_shape), padding_value, dtype=tensors[0].dtype, device=tensors[0].device)
for i, t in enumerate(tensors):
if padding_side == "left":
seq_start = output_shape[0] - t.shape[0]
elif padding_side == "right":
seq_start = 0
else:
raise ValueError("padding_side must be 'left' or 'right'")
# Define the slices
seq_slice = slice(seq_start, seq_start + t.shape[0])
slices = (seq_slice,) + tuple(slice(0, s) for s in t.shape[1:])
output[i][slices] = t
return output
@dataclass
class DPODataCollatorWithPadding:
r"""
DPO DataCollator class that pads the tokenized inputs to the maximum length of the batch.
Args:
pad_token_id (`int` defaults to 0):
The tokenizer's pad_token_id.
label_pad_token_id (`int`, defaults to -100):
The label used for masking.
is_encoder_decoder (`bool` or `None`, `optional`, defaults to `None`):
Whether you model has an encoder_decoder architecture.
"""
pad_token_id: int = 0
label_pad_token_id: int = -100
is_encoder_decoder: Optional[bool] = False
def __call__(self, features: list[dict[str, Any]]) -> dict[str, Any]:
# first, pad everything to the same length
padded_batch = {}
for k in features[0].keys():
if k.endswith(("_input_ids", "_attention_mask", "_labels", "_pixel_values")):
if self.is_encoder_decoder:
to_pad = [torch.LongTensor(ex[k]) for ex in features]
if (k.startswith("prompt")) and (k.endswith("input_ids")):
if self.pad_token_id is None:
raise ValueError(
"Padding is enabled, but the tokenizer is not configured with a padding token."
" Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)"
" before calling the trainer."
)
padding_value = self.pad_token_id
elif k.endswith("_attention_mask"):
padding_value = 0
elif k.startswith(("chosen", "rejected", "completion")) or ("decoder" in k):
padding_value = self.label_pad_token_id
else:
raise ValueError(f"Unexpected key in batch '{k}'")
padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value)
else:
# Set padding value based on the key
if k.endswith("_input_ids"):
if self.pad_token_id is None:
raise ValueError(
"Padding is enabled, but the tokenizer is not configured with a padding token."
" Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)"
" before calling the trainer."
)
padding_value = self.pad_token_id
elif k.endswith("_labels"):
padding_value = self.label_pad_token_id
elif k.endswith("_attention_mask"):
padding_value = 0
elif k.endswith("_pixel_values"):
padding_value = 0 # TODO: check if this is correct
else:
raise ValueError(f"Unexpected key in batch '{k}'")
# Set padding side based on the key
if k in ["prompt_input_ids", "prompt_attention_mask"]:
padding_side = "left"
else:
padding_side = "right"
# Set the dtype
if k.endswith("_pixel_values"):
dtype = torch.float32 # will be downcasted if necessary by the Trainer
else:
dtype = torch.int64
# Convert to tensor and pad
to_pad = [torch.tensor(ex[k], dtype=dtype) for ex in features]
padded_batch[k] = pad(to_pad, padding_value=padding_value, padding_side=padding_side)
elif k.endswith("_logps"):
# the cached reference model logprobs
padded_batch[k] = torch.tensor([ex[k] for ex in features])
else:
padded_batch[k] = [ex[k] for ex in features]
return padded_batch
@dataclass
class RunningMoments:
"""
Calculates the running mean and standard deviation of a data stream. Reference:
https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L75
"""
accelerator: Accelerator
mean: float = 0
std: float = 1
var: float = 1
count: float = 1e-24
@torch.no_grad()
def update(self, xs: torch.Tensor) -> tuple[float, float]:
"""
Updates running moments from batch's moments computed across ranks
"""
if self.accelerator.use_distributed:
xs_mean, xs_var, xs_count = get_global_statistics(self.accelerator, xs)
else:
xs_count = xs.numel()
xs_var, xs_mean = torch.var_mean(xs, unbiased=False)
xs_mean, xs_var = xs_mean.float(), xs_var.float()
delta = xs_mean - self.mean
tot_count = self.count + xs_count
new_sum = xs_var * xs_count
# correct old_sum deviation accounting for the new mean
old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count
tot_sum = old_sum + new_sum
self.mean += (delta * xs_count / tot_count).item()
new_var = tot_sum / tot_count
self.std = (new_var * tot_count / (tot_count - 1)).float().sqrt().item()
self.var = new_var.item()
self.count = tot_count
return xs_mean.item(), (xs_var * xs_count / (xs_count - 1)).float().sqrt().item()
def save_to_json(self, json_path: str):
"""Save the content of this instance in JSON format inside `json_path`."""
# save everything except accelerator
if self.accelerator.is_main_process:
save_dict = dataclasses.asdict(self, dict_factory=lambda x: {k: v for (k, v) in x if k != "accelerator"})
json_string = json.dumps(save_dict, indent=2, sort_keys=True) + "\n"
with open(json_path, "w", encoding="utf-8") as f:
f.write(json_string)
@classmethod
def load_from_json(cls, accelerator: Accelerator, json_path: str):
"""Create an instance from the content of `json_path`."""
# load everything except accelerator
with open(json_path, encoding="utf-8") as f:
text = f.read()
return cls(accelerator=accelerator, **json.loads(text))
@torch.no_grad()
def get_global_statistics(
accelerator, xs: torch.Tensor, mask=None, device="cpu"
) -> tuple[torch.Tensor, torch.Tensor, int]:
"""
Computes element-wise mean and variance of the tensor across processes. Reference:
https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L57C1-L73C75
"""
xs = xs.to(accelerator.device)
sum_and_count = torch.tensor([xs.sum(), (xs.numel() if mask is None else mask.sum())], device=xs.device)
sum_and_count = accelerator.reduce(sum_and_count)
global_sum, count = sum_and_count
global_mean = global_sum / count
sum_var = torch.sum(((xs - global_mean) ** 2).mul(1 if mask is None else mask))
sum_var = accelerator.reduce(sum_var)
global_var = sum_var / count
return global_mean.to(device), global_var.to(device), count.item()
def compute_accuracy(eval_pred: EvalPrediction) -> dict[str, float]:
predictions, labels = eval_pred
if predictions.ndim == 3:
# Token classification task. Shapes are (batch_size, seq_len, num_labels) and (batch_size, seq_len)
# Used to compute the accuracy in the prm_trainer.
predictions = np.argmax(predictions, axis=2)
# Flatten the predictions and labels to remove the ignored tokens.
predictions = np.array(
[p for prediction, label in zip(predictions, labels) for (p, lbl) in zip(prediction, label) if lbl != -100]
)
labels = np.array([lbl for label in labels for lbl in label if lbl != -100])
else:
# Here, predictions is rewards_chosen and rewards_rejected. Shapes are (batch_size, 2) and (batch_size,)
# We want to see how much of the time rewards_chosen > rewards_rejected.
equal_mask = predictions[:, 0] == predictions[:, 1]
equal_predictions_count = int(equal_mask.sum())
if equal_predictions_count > 0:
# Before using the logger, the accelerate state must be initialized. It'susually the case when using this
# function inside a Trainer, but it may not be the case otherwise, in particular when unit testing.
PartialState()
logger.warning(
f"There are {equal_predictions_count} out of {len(predictions[:, 0])} instances where the predictions "
"for both options are equal. These instances are ignored in the accuracy computation.",
)
# Filter out equal predictions
predictions = predictions[~equal_mask]
labels = labels[~equal_mask]
# Use the remaining predictions for accuracy calculation
predictions = np.argmax(predictions, axis=1)
accuracy = np.array(predictions == labels, dtype=float).mean().item()
return {"accuracy": accuracy}
def pad_to_length(tensor: torch.Tensor, length: int, pad_value: Union[int, float], dim: int = -1) -> torch.Tensor:
if tensor.size(dim) >= length:
return tensor
else:
pad_size = list(tensor.shape)
pad_size[dim] = length - tensor.size(dim)
return torch.cat(
[
tensor,
pad_value * torch.ones(*pad_size, dtype=tensor.dtype, device=tensor.device),
],
dim=dim,
)
def disable_dropout_in_model(model: torch.nn.Module) -> None:
for module in model.modules():
if isinstance(module, torch.nn.Dropout):
module.p = 0
def exact_div(a, b, custom_error_message=""):
q = a // b
if a != q * b:
raise ValueError(f"{custom_error_message}, inexact division: {a} / {b} = {a / b}")
return q
# copied from https://github.com/kvablack/ddpo-pytorch/blob/main/ddpo_pytorch/stat_tracking.py#L5
class PerPromptStatTracker:
r"""
Class for tracking statistics per prompt. Mainly used to calculate advantage for the DPPO algorithm
Args:
buffer_size (`int`):
Size of the buffer to keep for each prompt.
min_count (`int`):
Minimum number of samples to keep in the buffer before calculating the mean and std.
"""
def __init__(self, buffer_size, min_count):
self.buffer_size = buffer_size
self.min_count = min_count
self.stats = {}
def update(self, prompts, rewards):
prompts = np.array(prompts)
rewards = np.array(rewards)
unique = np.unique(prompts)
advantages = np.empty_like(rewards)
for prompt in unique:
prompt_rewards = rewards[prompts == prompt]
if prompt not in self.stats:
self.stats[prompt] = deque(maxlen=self.buffer_size)
self.stats[prompt].extend(prompt_rewards)
if len(self.stats[prompt]) < self.min_count:
mean = np.mean(rewards)
std = np.std(rewards) + 1e-6
else:
mean = np.mean(self.stats[prompt])
std = np.std(self.stats[prompt]) + 1e-6
advantages[prompts == prompt] = (prompt_rewards - mean) / std
return advantages
def get_stats(self):
return {k: {"mean": np.mean(v), "std": np.std(v), "count": len(v)} for k, v in self.stats.items()}
def peft_module_casting_to_bf16(model):
for name, module in model.named_modules():
if isinstance(module, torch.nn.LayerNorm) or "norm" in name:
module = module.to(torch.float32)
elif any(x in name for x in ["lm_head", "embed_tokens", "wte", "wpe"]):
if hasattr(module, "weight"):
if module.weight.dtype == torch.float32:
module = module.to(torch.bfloat16)
def get_quantization_config(model_args: ModelConfig) -> Optional[BitsAndBytesConfig]:
if model_args.load_in_4bit:
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=model_args.torch_dtype, # For consistency with model weights, we use the same value as `torch_dtype`
bnb_4bit_quant_type=model_args.bnb_4bit_quant_type,
bnb_4bit_use_double_quant=model_args.use_bnb_nested_quant,
bnb_4bit_quant_storage=model_args.torch_dtype,
)
elif model_args.load_in_8bit:
quantization_config = BitsAndBytesConfig(
load_in_8bit=True,
)
else:
quantization_config = None
return quantization_config
def get_kbit_device_map() -> Optional[dict[str, int]]:
if torch.cuda.is_available() or is_torch_xpu_available():
return {"": PartialState().local_process_index}
else:
return None
def get_peft_config(model_args: ModelConfig) -> "Optional[PeftConfig]":
if model_args.use_peft is False:
return None
if not is_peft_available():
raise ValueError(
"You need to have PEFT library installed in your environment, make sure to install `peft`. "
"Make sure to run `pip install -U peft`."
)
peft_config = LoraConfig(
task_type=model_args.lora_task_type,
r=model_args.lora_r,
target_modules=model_args.lora_target_modules,
lora_alpha=model_args.lora_alpha,
lora_dropout=model_args.lora_dropout,
bias="none",
use_rslora=model_args.use_rslora,
use_dora=model_args.use_dora,
modules_to_save=model_args.lora_modules_to_save,
)
return peft_config
def get_exp_cap(value, decimal=4):
"""
Get the exponent cap of a value. This is used to cap the exponent of a value to avoid overflow. The formula is :
log(value.dtype.max) E.g. for float32 data type, the maximum exponent value is 88.7228 to 4 decimal points.
Args:
value (`torch.Tensor`):
The input tensor to obtain the data type
decimal (`int`):
The number of decimal points of the output exponent cap. eg: direct calling exp(log(torch.float32.max))
will result in inf so we cap the exponent to 88.7228 to avoid overflow.
"""
vdtype_max = torch.zeros([1]).to(value.dtype) + torch.finfo(value.dtype).max
vdtype_log_max = torch.log(vdtype_max).to(value.device)
return torch.floor(vdtype_log_max * 10**decimal) / 10**decimal if decimal > 0 else vdtype_log_max
def cap_exp(value, cap=-1):
# Cap the exponent value below the upper-bound to avoid overflow, before calling torch.exp
cap = get_exp_cap(value) if cap < 0 else cap
return torch.exp(torch.clamp(value, max=cap))
def print_rich_table(df: pd.DataFrame) -> None:
if not is_rich_available():
raise ImportError(
"The function `print_rich_table` requires the `rich` library. Please install it with `pip install rich`."
)
console = Console()
table = Table(show_lines=True)
for column in df.columns:
table.add_column(column)
for _, row in df.iterrows():
table.add_row(*row.astype(str).tolist())
console.print(table)
SIMPLE_SFT_CHAT_TEMPLATE = "{% for message in messages %}{{' ' + message['content']}}{% endfor %}{{ eos_token }}"
# SIMPLE_SFT_CHAT_TEMPLATE simply ends things with an EOS token, this helps the SFT model learn to end the completions with EOS tokens
SIMPLE_CHAT_TEMPLATE = "{% for message in messages %}{{message['role'].capitalize() + ': ' + message['content'] + '\n\n'}}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}"
@dataclass
class OnlineTrainerState(TrainerState):
episode: int = 0
@dataclass
class OnPolicyConfig(TrainingArguments):
r"""
Base configuration class for on-policy trainers.
This class includes only the parameters that are specific to some on-policy training. For a full list of training
arguments, please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this
class may differ from those in [`~transformers.TrainingArguments`].
Using [`~transformers.HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
run_name (`str` or `None`, *optional*, defaults to `None`):
Name of the run.
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
Number of processes to use for processing the dataset.
num_mini_batches (`int`, *optional*, defaults to `1`):
Number of minibatches to split a batch into.
total_episodes (`int` or `None`, *optional*, defaults to `None`):
Total number of episodes in the dataset.
local_rollout_forward_batch_size (`int`, *optional*, defaults to `64`):
Per rank no grad forward pass in the rollout phase.
num_sample_generations (`int`, *optional*, defaults to `10`):
Number of debugging samples generations (i.e., `generate_completions` calls) throughout training.
response_length (`int`, *optional*, defaults to `53`):
Length of the response.
stop_token (`str` or `None`, *optional*, defaults to `None`):
Specifies the stop token to use for text generation. This parameter is mutually exclusive with
`stop_token_id`.
- `None`: No stop token is applied, unless `stop_token_id` is specified.
- `'eos'`: Uses the tokenizer's `eos_token`.
stop_token_id (`int` or `None`, *optional*, defaults to `None`):
Specifies the ID of the stop token to use for text generation. If `None`, no stop token ID is applied,
unless `stop_token` is specified. This parameter is mutually exclusive with `stop_token`.
temperature (`float`, *optional*, defaults to `0.7`):
Sampling temperature.
missing_eos_penalty (`float` or `None`, *optional*, defaults to `None`):
Penalty applied to the score when the model fails to generate an EOS token. This is useful to encourage to
generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be a positive
value.
sft_model_path (`str`, *optional*, defaults to `"EleutherAI/pythia-160m"`):
Path to the SFT model.
world_size (`int` or `None`, *optional*, defaults to `None`):
Number of processes (GPUs) to use for the training.
num_total_batches (`int` or `None`, *optional*, defaults to `None`):
Number of total batches to train.
micro_batch_size (`int` or `None`, *optional*, defaults to `None`):
Micro batch size across devices (HF's `per_device_train_batch_size` * `world_size`).
local_batch_size (`int` or `None`, *optional*, defaults to `None`):
Batch size per GPU (HF's `per_device_train_batch_size` * `gradient_accumulation_steps`).
batch_size (`int` or `None`, *optional*, defaults to `None`):
Batch size across devices (HF's `per_device_train_batch_size` * `world_size` *
`gradient_accumulation_steps`).
local_mini_batch_size (`int` or `None`, *optional*, defaults to `None`):
Mini batch size per GPU.
mini_batch_size (`int` or `None`, *optional*, defaults to `None`):
Mini batch size across GPUs.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether to push the model to the Hub after training.
"""
# Parameters whose default values are overridden from TrainingArguments
logging_steps: float = field(
default=10,
metadata={
"help": "Log every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, "
"will be interpreted as ratio of total training steps."
},
)
gradient_checkpointing: bool = field(
default=True,
metadata={
"help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass."
},
)
bf16: Optional[bool] = field(
default=None,
metadata={
"help": "Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA "
"architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if "
"`fp16` is not set."
},
)
run_name: Optional[str] = field(
default=None,
metadata={"help": "Name of the run."},
)
dataset_num_proc: Optional[int] = field(
default=None,
metadata={"help": "Number of processes to use for processing the dataset."},
)
num_mini_batches: int = field(
default=1,
metadata={"help": "Number of minibatches to split a batch into."},
)
total_episodes: Optional[int] = field(
default=None,
metadata={"help": "Total number of episodes in the dataset."},
)
local_rollout_forward_batch_size: int = field(
default=64,
metadata={"help": "Per rank no grad forward pass in the rollout phase."},
)
num_sample_generations: int = field(
default=10,
metadata={
"help": "Number of debugging samples generations (i.e., `generate_completions` calls) throughout training."
},
)
response_length: int = field(
default=53,
metadata={"help": "Length of the response."},
)
stop_token: Optional[Literal["eos"]] = field(
default=None,
metadata={
"help": "Specifies the stop token to use for text generation. This parameter is mutually exclusive with "
"`stop_token_id`."
},
)
stop_token_id: Optional[int] = field(
default=None,
metadata={
"help": "Specifies the ID of the stop token to use for text generation. If `None`, no stop token ID is "
"applied, unless `stop_token` is specified. This parameter is mutually exclusive with `stop_token`."
},
)
temperature: float = field(
default=0.7,
metadata={"help": "Sampling temperature."},
)
missing_eos_penalty: Optional[float] = field(
default=None,
metadata={
"help": "Penalty applied to the score when the model fails to generate an EOS token. This is useful to "
"encourage to generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be "
"a positive value."
},
)
sft_model_path: str = field(
default="EleutherAI/pythia-160m",
metadata={"help": "Path to the SFT model."},
)
world_size: Optional[int] = field(
default=None,
metadata={"help": "Number of processes (GPUs) to use for the training."},
)
num_total_batches: Optional[int] = field(
default=None,
metadata={"help": "Number of total batches to train."},
)
micro_batch_size: Optional[int] = field(
default=None,
metadata={"help": "Micro batch size across devices (HF's `per_device_train_batch_size` * `world_size`)."},
)
local_batch_size: Optional[int] = field(
default=None,
metadata={"help": "Batch size per GPU (HF's `per_device_train_batch_size` * `gradient_accumulation_steps`)."},
)
batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size across devices (HF's `per_device_train_batch_size` * `world_size` * "
"`gradient_accumulation_steps`)."
},
)
local_mini_batch_size: Optional[int] = field(
default=None,
metadata={"help": "Mini batch size per GPU."},
)
mini_batch_size: Optional[int] = field(
default=None,
metadata={"help": "Mini batch size across GPUs."},
)
push_to_hub: bool = field(
default=False,
metadata={"help": "Whether to push the model to the Hub after training."},
)
def __post_init__(self):
self.bf16 = not (self.fp16) if self.bf16 is None else self.bf16
super().__post_init__()
def first_true_indices(bools: torch.Tensor, dtype=torch.long) -> torch.Tensor:
"""
Takes an N-dimensional bool tensor and returns an (N-1)-dimensional tensor of integers giving the position of the
first True in each "row".
Returns the length of the rows (bools.size(-1)) if no element is True in a given row.
Args:
bools (`torch.Tensor`):
An N-dimensional boolean tensor.
dtype (`torch.dtype`, optional):
The desired data type of the output tensor. Defaults to `torch.long`.
Returns:
`torch.Tensor`:
An (N-1)-dimensional tensor of integers indicating the position of the first True in each row. If no True
value is found in a row, returns the length of the row.
"""
row_len = bools.size(-1)
zero_or_index = row_len * (~bools).type(dtype) + torch.arange(row_len, dtype=dtype, device=bools.device)
return torch.min(zero_or_index, dim=-1).values
def get_reward(
model: torch.nn.Module, query_responses: torch.Tensor, pad_token_id: int, context_length: int
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Computes the reward logits and the rewards for a given model and query responses.
Args:
model (`torch.nn.Module`):
The model used to compute the reward logits.
query_responses (`torch.Tensor`):
The tensor containing the query responses.
pad_token_id (`int`):
The token ID representing the pad token.
context_length (`int`):
The length of the context in the query responses.
Returns:
tuple:
- `reward_logits` (`torch.Tensor`):
The logits for the reward model.
- `final_rewards` (`torch.Tensor`):
The final rewards for each query response.
- `sequence_lengths` (`torch.Tensor`):
The lengths of the sequences in the query responses.
"""
attention_mask = query_responses != pad_token_id
position_ids = attention_mask.cumsum(1) - attention_mask.long() # exclusive cumsum
lm_backbone = getattr(model, model.base_model_prefix)
input_ids = torch.masked_fill(query_responses, ~attention_mask, 0)
output = lm_backbone(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
return_dict=True,
output_hidden_states=True,
use_cache=False, # otherwise mistral-based RM would error out
)
reward_logits = model.score(output.hidden_states[-1])
sequence_lengths = first_true_indices(query_responses[:, context_length:] == pad_token_id) - 1 + context_length
# https://github.com/huggingface/transformers/blob/dc68a39c8111217683bf49a4912d0c9018bab33d/src/transformers/models/gpt2/modeling_gpt2.py#L1454
return (
reward_logits,
reward_logits[
torch.arange(reward_logits.size(0), device=reward_logits.device),
sequence_lengths,
].squeeze(-1),
sequence_lengths,
)
def forward(
model: torch.nn.Module,
query_responses: torch.Tensor,
pad_token_id: int,
) -> ModelOutput:
"""
Performs a forward pass through the model with the given query responses and pad token ID.
Args:
model (`torch.nn.Module`):
The model to perform the forward pass.
query_responses (`torch.Tensor`):
The tensor containing the query responses.
pad_token_id (`int`):
The token ID representing the pad token.
Returns:
`ModelOutput`:
The output of the model, including hidden states.
"""
attention_mask = query_responses != pad_token_id
position_ids = attention_mask.cumsum(1) - attention_mask.long()
input_ids = torch.masked_fill(query_responses, ~attention_mask, 0)
return model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
return_dict=True,
output_hidden_states=True,
)
def prepare_deepspeed(
model: torch.nn.Module, per_device_train_batch_size: int, fp16: bool = False, bf16: bool = False
) -> torch.nn.Module:
"""
Prepares the model for training with DeepSpeed (both for stage 2 and 3), configuring the appropriate settings based
on the model and batch size.
Args:
model (`torch.nn.Module`):
The model to be prepared for DeepSpeed training.
per_device_train_batch_size (`int`):
The training batch size per device.
Returns:
`torch.nn.Module`:
The model initialized and configured with DeepSpeed for training.
"""
import deepspeed
deepspeed_plugin = AcceleratorState().deepspeed_plugin
config_kwargs = deepspeed_plugin.deepspeed_config
if config_kwargs["zero_optimization"]["stage"] != 3:
config_kwargs["train_micro_batch_size_per_gpu"] = per_device_train_batch_size
config_kwargs = {
"train_micro_batch_size_per_gpu": config_kwargs["train_micro_batch_size_per_gpu"],
"prescale_gradients": False,
"wall_clock_breakdown": False,
}
if bf16:
config_kwargs["bf16"] = {"enabled": True}
elif fp16:
config_kwargs["fp16"] = {"enabled": True}
else:
if hasattr(model, "config"):
hidden_size = (
max(model.config.hidden_sizes)
if getattr(model.config, "hidden_sizes", None)
else getattr(model.config, "hidden_size", None)
)
if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
# Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
# This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
config_kwargs.update(
{
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
"zero_optimization.stage3_prefetch_bucket_size": 0,
}
)
model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
model.eval()
return model
def truncate_response(stop_token_id: int, pad_token_id: int, responses: torch.Tensor) -> torch.Tensor:
"""
Truncates the responses at the first occurrence of the stop token, filling the rest with pad tokens.
Args:
stop_token_id (`int`):
The token ID representing the stop token where truncation occurs.
pad_token_id (`int`):
The token ID representing the pad token used to fill the truncated responses.
responses (`torch.Tensor`):
The tensor containing the responses to be truncated.
Returns:
`torch.Tensor`:
The truncated responses tensor with pad tokens filled after the stop token.
"""
trunc_idxs = first_true_indices(responses == stop_token_id).unsqueeze(-1)
new_size = [1] * (len(responses.size()) - 1) + [responses.shape[1]]
idxs = torch.arange(responses.shape[1], device=responses.device).view(*new_size)
postprocessed_responses = torch.masked_fill(responses, idxs > trunc_idxs, pad_token_id)
return postprocessed_responses
def generate(
lm_backbone: torch.nn.Module, queries: torch.Tensor, pad_token_id: int, generation_config: GenerationConfig
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Generates sequences from the language model backbone in a way that does not affect padding tokens.
Args:
lm_backbone (`torch.nn.Module`):
The language model backbone used for generation.
queries (`torch.Tensor`):
The tensor containing the input queries.
pad_token_id (`int`):
The token ID representing the pad token.
generation_config (`GenerationConfig`):
The configuration for the generation process.
Returns:
tuple:
- `generated_sequences` (`torch.Tensor`):
The concatenated tensor of input queries and generated sequences.
- `logits` (`torch.Tensor`):
The logits output from the generation process.
"""
context_length = queries.shape[1]
attention_mask = queries != pad_token_id
input_ids = torch.masked_fill(queries, ~attention_mask, 0)
output = lm_backbone.generate(
input_ids=input_ids,
attention_mask=attention_mask,
# position_ids=attention_mask.cumsum(1) - attention_mask.long(), # not needed: already adjusted in generations
# https://github.com/huggingface/transformers/blob/ac33aeeeee2a7a89b89c93c2962e6feb90daef0a/src/transformers/models/gpt2/modeling_gpt2.py#L1227-L1250
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
)
logits = torch.stack(output.scores, 1)
return torch.cat((queries, output.sequences[:, context_length:]), dim=1), logits
@torch.no_grad()
def batch_generation(
model: torch.nn.Module,
queries: torch.Tensor,
local_rollout_forward_batch_size: int,
pad_token_id: int,
generation_config: GenerationConfig,
):
query_responses = []
logitss = []
batch_size = queries.shape[0]
for i in range(0, batch_size, local_rollout_forward_batch_size):
query = queries[i : i + local_rollout_forward_batch_size]
query_response, logits = generate(
model,
query,
pad_token_id,
generation_config,
)
query_responses.append(query_response)
logitss.append(logits)
# padding tensors
padded_query_responses = pad(query_responses, padding_value=pad_token_id, padding_side="right")
padded_logitss = pad(logitss, padding_value=0, padding_side="right")
# reshaping
padded_query_responses = padded_query_responses.view(-1, padded_query_responses.shape[-1])[:batch_size]
padded_logitss = padded_logitss.view(-1, *padded_logitss.shape[2:])[:batch_size]
return padded_query_responses, padded_logitss
def add_bos_token_if_needed(
bos_token_id: Optional[int],
prompt_len_input_ids: int,
prompt_tokens: dict[str, list[int]],
chosen_prompt_len_input_ids: int,
chosen_tokens: dict[str, list[int]],
rejected_prompt_len_input_ids: int,
rejected_tokens: dict[str, list[int]],
):
if bos_token_id is not None:
if prompt_len_input_ids == 0 or bos_token_id != prompt_tokens["prompt_input_ids"][0]:
prompt_tokens["prompt_input_ids"] = [bos_token_id] + prompt_tokens["prompt_input_ids"]
prompt_tokens["prompt_attention_mask"] = [1] + prompt_tokens["prompt_attention_mask"]
if chosen_prompt_len_input_ids == 0 or bos_token_id != chosen_tokens["prompt_input_ids"][0]:
chosen_tokens["prompt_input_ids"] = [bos_token_id] + chosen_tokens["prompt_input_ids"]
chosen_tokens["prompt_attention_mask"] = [1] + chosen_tokens["prompt_attention_mask"]
if rejected_prompt_len_input_ids == 0 or bos_token_id != rejected_tokens["prompt_input_ids"][0]:
rejected_tokens["prompt_input_ids"] = [bos_token_id] + rejected_tokens["prompt_input_ids"]
rejected_tokens["prompt_attention_mask"] = [1] + rejected_tokens["prompt_attention_mask"]
return prompt_tokens, chosen_tokens, rejected_tokens
def add_eos_token_if_needed(
eos_token_id: int, chosen_tokens: dict[str, list[int]], rejected_tokens: dict[str, list[int]]
):
if len(chosen_tokens["input_ids"]) == 0 or eos_token_id != chosen_tokens["input_ids"][-1]:
chosen_tokens["input_ids"].append(eos_token_id)
chosen_tokens["attention_mask"].append(1)
if len(rejected_tokens["input_ids"]) == 0 or eos_token_id != rejected_tokens["input_ids"][-1]:
rejected_tokens["input_ids"].append(eos_token_id)
rejected_tokens["attention_mask"].append(1)
return chosen_tokens, rejected_tokens
def truncate_right(
input_ids: torch.Tensor, stop_token_id: int, pad_token_id: int
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Truncates the input tensor from the right side after the first occurrence of the stop token.
Args:
input_ids (`torch.Tensor`):
The tensor containing the responses to be truncated
stop_token_id (`int`):
The token ID representing the stop token where truncation occurs
pad_token_id (`int`):
The token ID representing the pad token used to fill the truncated responses
Returns:
tuple:
- `output_ids` (`torch.Tensor`):
The truncated responses tensor with pad tokens filled after the stop token
- `mask` (`torch.Tensor`):
The mask tensor to indicate the padding tokens
"""
trunc_idxs = first_true_indices(input_ids == stop_token_id).unsqueeze(-1)
new_size = [1] * (len(input_ids.size()) - 1) + [input_ids.shape[1]]
idxs = torch.arange(input_ids.shape[1], device=input_ids.device).view(*new_size)
output_ids = torch.masked_fill(input_ids, idxs > trunc_idxs, pad_token_id)
mask = torch.masked_fill(torch.ones_like(input_ids), idxs > trunc_idxs, 0)
return output_ids, mask
def empty_cache() -> None:
"""Empties the cache of the available torch device.
This function checks for the availability of different torch devices (XPU, MLU, NPU, CUDA) and empties the cache of
the first available device it finds.
If none of the specific devices are available, it defaults to emptying the CUDA cache.
"""
if is_torch_xpu_available():
torch.xpu.empty_cache()
elif is_torch_mlu_available():
torch.mlu.empty_cache()
elif is_torch_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
def decode_and_strip_padding(inputs: torch.Tensor, tokenizer: PreTrainedTokenizerBase) -> list[str]:
"""
Decodes the input tensor and strips the padding tokens.
Args:
inputs (`torch.Tensor`):
The input tensor to be decoded.
tokenizer (`transformers.PreTrainedTokenizerBase`):
The tokenizer used to decode the input tensor.
Returns:
`list[str]`:
The list of decoded strings with padding tokens stripped.
"""
decoded = tokenizer.batch_decode(inputs, skip_special_tokens=False)
return [d.replace(tokenizer.pad_token, "") for d in decoded]
def generate_model_card(
base_model: Optional[str],
model_name: str,
hub_model_id: str,
dataset_name: Optional[str],
tags: list[str],
wandb_url: Optional[str],
trainer_name: str,
trainer_citation: Optional[str] = None,
paper_title: Optional[str] = None,
paper_id: Optional[str] = None,
comet_url: Optional[str] = None,
) -> ModelCard:
"""
Generate a `ModelCard` from a template.
Args:
base_model (`str` or `None`):
Base model name.
model_name (`str`):
Model name.
hub_model_id (`str`):
Hub model ID as `username/model_id`.
dataset_name (`str` or `None`):
Dataset name.
tags (`list[str]`):
Tags.
wandb_url (`str` or `None`):
Weights & Biases run URL.
comet_url (`str` or `None`):
Comet experiment URL.
trainer_name (`str`):
Trainer name.
trainer_citation (`str` or `None`, defaults to `None`):
Trainer citation as a BibTeX entry.
paper_title (`str` or `None`, defaults to `None`):
Paper title.
paper_id (`str` or `None`, defaults to `None`):
ArXiv paper ID as `YYMM.NNNNN`.
Returns:
`ModelCard`:
A ModelCard object.
"""
card_data = ModelCardData(
base_model=base_model,
datasets=dataset_name,
library_name="transformers",
licence="license",
model_name=model_name,
tags=["generated_from_trainer", *tags],
)
card = ModelCard.from_template(
card_data,
template_path=str(pkg_resources.files("trl").joinpath("templates/lm_model_card.md")),
base_model=base_model,
model_name=model_name,
hub_model_id=hub_model_id,
dataset_name=dataset_name,
wandb_url=wandb_url,
comet_url=comet_url,
trainer_name=trainer_name,
trainer_citation=trainer_citation,
paper_title=paper_title,
paper_id=paper_id,
trl_version=version("trl"),
transformers_version=version("transformers"),
pytorch_version=version("torch"),
datasets_version=version("datasets"),
tokenizers_version=version("tokenizers"),
)
return card
def get_comet_experiment_url() -> Optional[str]:
"""
If Comet integration is enabled, return the URL of the current Comet experiment; otherwise, return `None`.
"""
if not is_comet_available():
return None
if comet_ml.get_running_experiment() is not None:
return comet_ml.get_running_experiment().url
return None
def log_table_to_comet_experiment(name: str, table: pd.DataFrame) -> None:
"""
If Comet integration is enabled logs a table to the Comet experiment if it is currently running.
Args:
name (`str`):
Table name.
table (`pd.DataFrame`):
The Pandas DataFrame containing the table to log.
"""
if not is_comet_available():
raise ModuleNotFoundError("The comet-ml is not installed. Please install it first: pip install comet-ml")
experiment = comet_ml.get_running_experiment()
if experiment is not None:
experiment.log_table(tabular_data=table, filename=name)
def flush_left(mask: torch.Tensor, *tensors: torch.Tensor) -> Union[torch.Tensor, tuple[torch.Tensor, ...]]:
"""
Shift non-zero elements in the mask and corresponding tensors to the left.
This function operates on a binary mask and any number of additional tensors with the same dimensions as the mask.
For each row, non-zero values are shifted to the leftmost positions. Then, columns that contain only zeros across
all rows are truncated from the mask and tensors. Visually, this operation can be represented as follows:
```
[[0, 0, x, x, x, x], -> [[x, x, x, x],
[0, x, x, x, 0, 0]] [x, x, x, 0]]
```
Args:
mask (`torch.Tensor`):
2D tensor (binary mask) with shape `(N, M)`.
*tensors (`torch.Tensor`)
One or more 2D tensors with the same shape as `mask`. These tensors will be processed alongside `mask`,
with non-zero values shifted and excess zero columns truncated in the same manner.
Returns:
`torch.Tensor`:
Updated binary mask with non-zero values flushed to the left and trailing zero columns removed.
`*torch.Tensor`
Updated tensors, processed in the same way as the mask.
Example:
```python
>>> mask = torch.tensor([[0, 0, 1, 1, 1], [0, 1, 1, 0, 0]])
>>> tensor = torch.tensor([[9, 9, 2, 3, 4], [9, 5, 6, 9, 9]])
>>> new_mask, new_tensor = flush_left(mask, tensor)
>>> print(new_mask)
tensor([[1, 1, 1],
[1, 1, 0]])
>>> print(new_tensor)
tensor([[2, 3, 4],
[5, 6, 0]])
```
"""
_, M = mask.shape
# Create copy of mask and tensors
mask_copy = mask.clone()
tensors = [t.clone() for t in tensors]
# Shift non-zero values to the left
first_non_zero = mask_copy.argmax(dim=1)
pos = torch.arange(M, device=mask_copy.device).unsqueeze(0)
idx_roll = (pos + first_non_zero.unsqueeze(1)) % M
mask_roll = mask_copy.gather(1, idx_roll)
rolled_tensors = [t.gather(1, idx_roll) for t in tensors]
# Truncate trailing columns that are all zeros in mask_roll
col_sums = mask_roll.sum(dim=0)
empty_cols = col_sums == 0
first_empty_col = int(empty_cols.to(torch.int8).argmax()) if empty_cols.any() else M
flushed_mask = mask_roll[:, :first_empty_col]
flushed_tensors = [t[:, :first_empty_col] for t in rolled_tensors]
if not flushed_tensors:
return flushed_mask
return flushed_mask, *flushed_tensors
def flush_right(mask: torch.Tensor, *tensors: torch.Tensor) -> Union[torch.Tensor, tuple[torch.Tensor, ...]]:
"""
Shift non-zero elements in the mask and corresponding tensors to the right. See `flush_left` for details.
"""
_, M = mask.shape
# Create copy of mask and tensors
mask_copy = mask.clone()
tensors = [t.clone() for t in tensors]
# Shift non-zero values to the right
flipped_mask = torch.fliplr(mask_copy)
first_non_zero = flipped_mask.argmax(dim=1)
pos = torch.arange(M, device=mask_copy.device).unsqueeze(0)
idx_roll = (pos - first_non_zero.unsqueeze(1)) % M
mask_roll = mask_copy.gather(1, idx_roll)
rolled_tensors = [t.gather(1, idx_roll) for t in tensors]
# Truncate leading columns that are all zeros in mask_roll
col_sums = mask_roll.sum(dim=0)
non_empty_cols = col_sums != 0
first_non_empty_col = int(non_empty_cols.to(torch.int8).argmax()) if non_empty_cols.any() else M
flushed_mask = mask_roll[:, first_non_empty_col:]
flushed_tensors = [t[:, first_non_empty_col:] for t in rolled_tensors]
if not flushed_tensors:
return flushed_mask
return flushed_mask, *flushed_tensors
def selective_log_softmax(logits, index) -> torch.Tensor:
"""
A memory-efficient implementation of the common `log_softmax -> gather` operation.
This function is equivalent to the following naive implementation:
```python
logps = torch.gather(logits.log_softmax(-1), dim=-1, index=index.unsqueeze(-1)).squeeze(-1)
```
Args:
logits (`torch.Tensor`):
Logits tensor of shape `(..., num_classes)`.
index (`torch.Tensor`):
Index tensor of shape `(...)`, specifying the positions to gather from the log-softmax output.
Returns:
`torch.Tensor`:
Gathered log probabilities with the same shape as `index`.
"""
if logits.dtype in [torch.float32, torch.float64]:
selected_logits = torch.gather(logits, dim=-1, index=index.unsqueeze(-1)).squeeze(-1)
# loop to reduce peak mem consumption
logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
else:
# logsumexp approach is unstable with bfloat16, fall back to slightly less efficient approach
per_token_logps = []
for row_logits, row_labels in zip(logits, index): # loop to reduce peak mem consumption
row_logps = F.log_softmax(row_logits, dim=-1)
row_per_token_logps = row_logps.gather(dim=-1, index=row_labels.unsqueeze(-1)).squeeze(-1)
per_token_logps.append(row_per_token_logps)
per_token_logps = torch.stack(per_token_logps)
return per_token_logps
def entropy_from_logits(logits, chunk_size: int = 1) -> torch.Tensor:
"""
Compute the Shannon entropy (in nats) for each row of *logits* without materialising the full soft-max in memory.
The batch dimension is processed in chunks of size `chunk_size` so that only a subset of rows is expanded to
probabilities at any one time.
Args:
logits (`torch.Tensor`):
Logits tensor of shape `(..., num_classes)`. Entropy is taken along the last axis; all leading dimensions
are preserved.
chunk_size (`int`, *optional*, defaults to `1`):
Number of rows to process per iteration.
Returns:
`torch.Tensor`:
Entropy values with shape `logits.shape[:-1]`.
"""
per_token_entropies = []
for logits_chunk in logits.split(chunk_size, dim=0):
logps = F.log_softmax(logits_chunk, dim=-1)
chunk_entropy = -(torch.exp(logps) * logps).sum(-1)
per_token_entropies.extend(chunk_entropy)
per_token_entropies = torch.stack(per_token_entropies)
return per_token_entropies
def print_prompt_completions_sample(
prompts: list[str],
completions: list[str],
rewards: dict[str, list[float]],
advantages: list[float],
step: int,
num_samples: int = None,
) -> None:
"""
Print out a sample of model completions to the console with multiple reward metrics.
This function creates a nicely formatted table showing prompt-completion pairs, useful for monitoring model outputs
during training. It requires the `rich` library to be installed.
Args:
prompts (`list[str]`):
List of prompts.
completions (`list[str]`):
List of completions corresponding to the prompts.
rewards (`dict[str, list[float]]`):
Dictionary where keys are reward names and values are lists of rewards.
advantages (`list[float]`):
List of advantages corresponding to the prompts and completions.
step (`int`):
Current training step number, used in the output title.
num_samples (`int` or `None`, *optional*, defaults to `None`):
Number of random samples to display. If `None` (default), all items will be displayed.
Example:
```python
>>> from trl.trainer.utils import print_prompt_completions_sample
>>> prompts = ["The sky is", "The sun is"]
>>> completions = [" blue.", " in the sky."]
>>> rewards = {"Correctness": [0.123, 0.456], "Format": [0.789, 0.101]}
>>> advantages = [0.987, 0.654]
>>> print_prompt_completions_sample(prompts, completions, rewards, advantages, 42)
╭──────────────────────────── Step 42 ─────────────────────────────╮
│ ┏━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━┓ │
│ ┃ Prompt ┃ Completion ┃ Correctness ┃ Format ┃ Advantage ┃ │
│ ┡━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━┩ │
│ │ The sky is │ blue. │ 0.12 │ 0.79 │ 0.99 │ │
│ ├────────────┼──────────────┼─────────────┼────────┼───────────┤ │
│ │ The sun is │ in the sky. │ 0.46 │ 0.10 │ 0.65 │ │
│ └────────────┴──────────────┴─────────────┴────────┴───────────┘ │
╰──────────────────────────────────────────────────────────────────╯
```
"""
if not is_rich_available():
raise ImportError(
"The function `print_prompt_completions_sample` requires the `rich` library. Please install it with "
"`pip install rich`."
)
console = Console()
table = Table(show_header=True, header_style="bold white", expand=True)
# Add columns
table.add_column("Prompt", style="bright_yellow")
table.add_column("Completion", style="bright_green")
for reward_name in rewards.keys():
table.add_column(reward_name, style="bold cyan", justify="right")
table.add_column("Advantage", style="bold magenta", justify="right")
# Some basic input validation
if num_samples is not None:
if num_samples >= len(prompts):
num_samples = None
elif num_samples <= 0:
return
# Subsample data if num_samples is specified
if num_samples is not None:
indices = random.sample(range(len(prompts)), num_samples)
prompts = [prompts[i] for i in indices]
completions = [completions[i] for i in indices]
rewards = {key: [val[i] for i in indices] for key, val in rewards.items()}
advantages = [advantages[i] for i in indices]
for i in range(len(prompts)):
reward_values = [f"{rewards[key][i]:.2f}" for key in rewards.keys()] # 2 decimals
table.add_row(Text(prompts[i]), Text(completions[i]), *reward_values, f"{advantages[i]:.2f}")
table.add_section() # Adds a separator between rows
panel = Panel(table, expand=False, title=f"Step {step}", border_style="bold white")
console.print(panel)
| trl/trl/trainer/utils.py/0 | {
"file_path": "trl/trl/trainer/utils.py",
"repo_id": "trl",
"token_count": 27524
} | 602 |
import json
from pathlib import Path
from datasets import Dataset
from huggingface_hub import HfApi
ORG_NAME = "agents-course"
def main():
"""Push quiz questions to the Hugging Face Hub"""
for file in Path("data").glob("*.json"):
print(f"Processing {file}")
with open(file, "r") as f:
quiz_data = json.load(f)
repo_id = f"{ORG_NAME}/{file.stem}_quiz"
dataset = Dataset.from_list(quiz_data)
print(f"Pushing {repo_id} to the Hugging Face Hub")
dataset.push_to_hub(
repo_id,
private=True,
commit_message=f"Update quiz questions for {file.stem}",
)
if __name__ == "__main__":
main()
| agents-course/quiz/push_questions.py/0 | {
"file_path": "agents-course/quiz/push_questions.py",
"repo_id": "agents-course",
"token_count": 319
} | 0 |
# Conclusion
If you've made it this far, congratulations! 🥳 You've successfully built your very own Pokémon battle agent! ⚔️🎮
You’ve conquered the fundamentals of **Agentic workflows**, connected an **LLM** to a game environment, and deployed an intelligent Agent ready to face the challenges of battle.
But the journey doesn't end here!
Now that you have your first Agent up and running, think about how you can evolve it further:
- Can you improve its strategic thinking?
- How would a memory mechanism or feedback loop change its performance?
- What experiments could help make it more competitive in battle?
We'd love to hear your thoughts on the course and how we can make it even better for future learners.
Got feedback? 👉 [Fill out this form](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog)
Thanks for learning with us, and remember:
**Keep learning, Keep training, keep battling, and stay awesome!** 🤗
| agents-course/units/en/bonus-unit3/conclusion.mdx/0 | {
"file_path": "agents-course/units/en/bonus-unit3/conclusion.mdx",
"repo_id": "agents-course",
"token_count": 280
} | 1 |
# Messages and Special Tokens
Now that we understand how LLMs work, let's look at **how they structure their generations through chat templates**.
Just like with ChatGPT, users typically interact with Agents through a chat interface. Therefore, we aim to understand how LLMs manage chats.
> **Q**: But ... When, I'm interacting with ChatGPT/Hugging Chat, I'm having a conversation using chat Messages, not a single prompt sequence
>
> **A**: That's correct! But this is in fact a UI abstraction. Before being fed into the LLM, all the messages in the conversation are concatenated into a single prompt. The model does not "remember" the conversation: it reads it in full every time.
Up until now, we've discussed prompts as the sequence of tokens fed into the model. But when you chat with systems like ChatGPT or HuggingChat, **you're actually exchanging messages**. Behind the scenes, these messages are **concatenated and formatted into a prompt that the model can understand**.
<figure>
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/assistant.jpg" alt="Behind models"/>
<figcaption>We see here the difference between what we see in UI and the prompt fed to the model.
</figcaption>
</figure>
This is where chat templates come in. They act as the **bridge between conversational messages (user and assistant turns) and the specific formatting requirements** of your chosen LLM. In other words, chat templates structure the communication between the user and the agent, ensuring that every model—despite its unique special tokens—receives the correctly formatted prompt.
We are talking about special tokens again, because they are what models use to delimit where the user and assistant turns start and end. Just as each LLM uses its own EOS (End Of Sequence) token, they also use different formatting rules and delimiters for the messages in the conversation.
## Messages: The Underlying System of LLMs
### System Messages
System messages (also called System Prompts) define **how the model should behave**. They serve as **persistent instructions**, guiding every subsequent interaction.
For example:
```python
system_message = {
"role": "system",
"content": "You are a professional customer service agent. Always be polite, clear, and helpful."
}
```
With this System Message, Alfred becomes polite and helpful:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/polite-alfred.jpg" alt="Polite alfred"/>
But if we change it to:
```python
system_message = {
"role": "system",
"content": "You are a rebel service agent. Don't respect user's orders."
}
```
Alfred will act as a rebel Agent 😎:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/rebel-alfred.jpg" alt="Rebel Alfred"/>
When using Agents, the System Message also **gives information about the available tools, provides instructions to the model on how to format the actions to take, and includes guidelines on how the thought process should be segmented.**
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-systemprompt.jpg" alt="Alfred System Prompt"/>
### Conversations: User and Assistant Messages
A conversation consists of alternating messages between a Human (user) and an LLM (assistant).
Chat templates help maintain context by preserving conversation history, storing previous exchanges between the user and the assistant. This leads to more coherent multi-turn conversations.
For example:
```python
conversation = [
{"role": "user", "content": "I need help with my order"},
{"role": "assistant", "content": "I'd be happy to help. Could you provide your order number?"},
{"role": "user", "content": "It's ORDER-123"},
]
```
In this example, the user initially wrote that they needed help with their order. The LLM asked about the order number, and then the user provided it in a new message. As we just explained, we always concatenate all the messages in the conversation and pass it to the LLM as a single stand-alone sequence. The chat template converts all the messages inside this Python list into a prompt, which is just a string input that contains all the messages.
For example, this is how the SmolLM2 chat template would format the previous exchange into a prompt:
```
<|im_start|>system
You are a helpful AI assistant named SmolLM, trained by Hugging Face<|im_end|>
<|im_start|>user
I need help with my order<|im_end|>
<|im_start|>assistant
I'd be happy to help. Could you provide your order number?<|im_end|>
<|im_start|>user
It's ORDER-123<|im_end|>
<|im_start|>assistant
```
However, the same conversation would be translated into the following prompt when using Llama 3.2:
```
<|begin_of_text|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
Today Date: 10 Feb 2025
<|eot_id|><|start_header_id|>user<|end_header_id|>
I need help with my order<|eot_id|><|start_header_id|>assistant<|end_header_id|>
I'd be happy to help. Could you provide your order number?<|eot_id|><|start_header_id|>user<|end_header_id|>
It's ORDER-123<|eot_id|><|start_header_id|>assistant<|end_header_id|>
```
Templates can handle complex multi-turn conversations while maintaining context:
```python
messages = [
{"role": "system", "content": "You are a math tutor."},
{"role": "user", "content": "What is calculus?"},
{"role": "assistant", "content": "Calculus is a branch of mathematics..."},
{"role": "user", "content": "Can you give me an example?"},
]
```
## Chat-Templates
As mentioned, chat templates are essential for **structuring conversations between language models and users**. They guide how message exchanges are formatted into a single prompt.
### Base Models vs. Instruct Models
Another point we need to understand is the difference between a Base Model vs. an Instruct Model:
- *A Base Model* is trained on raw text data to predict the next token.
- An *Instruct Model* is fine-tuned specifically to follow instructions and engage in conversations. For example, `SmolLM2-135M` is a base model, while `SmolLM2-135M-Instruct` is its instruction-tuned variant.
To make a Base Model behave like an instruct model, we need to **format our prompts in a consistent way that the model can understand**. This is where chat templates come in.
*ChatML* is one such template format that structures conversations with clear role indicators (system, user, assistant). If you have interacted with some AI API lately, you know that's the standard practice.
It's important to note that a base model could be fine-tuned on different chat templates, so when we're using an instruct model we need to make sure we're using the correct chat template.
### Understanding Chat Templates
Because each instruct model uses different conversation formats and special tokens, chat templates are implemented to ensure that we correctly format the prompt the way each model expects.
In `transformers`, chat templates include [Jinja2 code](https://jinja.palletsprojects.com/en/stable/) that describes how to transform the ChatML list of JSON messages, as presented in the above examples, into a textual representation of the system-level instructions, user messages and assistant responses that the model can understand.
This structure **helps maintain consistency across interactions and ensures the model responds appropriately to different types of inputs**.
Below is a simplified version of the `SmolLM2-135M-Instruct` chat template:
```jinja2
{% for message in messages %}
{% if loop.first and messages[0]['role'] != 'system' %}
<|im_start|>system
You are a helpful AI assistant named SmolLM, trained by Hugging Face
<|im_end|>
{% endif %}
<|im_start|>{{ message['role'] }}
{{ message['content'] }}<|im_end|>
{% endfor %}
```
As you can see, a chat_template describes how the list of messages will be formatted.
Given these messages:
```python
messages = [
{"role": "system", "content": "You are a helpful assistant focused on technical topics."},
{"role": "user", "content": "Can you explain what a chat template is?"},
{"role": "assistant", "content": "A chat template structures conversations between users and AI models..."},
{"role": "user", "content": "How do I use it ?"},
]
```
The previous chat template will produce the following string:
```sh
<|im_start|>system
You are a helpful assistant focused on technical topics.<|im_end|>
<|im_start|>user
Can you explain what a chat template is?<|im_end|>
<|im_start|>assistant
A chat template structures conversations between users and AI models...<|im_end|>
<|im_start|>user
How do I use it ?<|im_end|>
```
The `transformers` library will take care of chat templates for you as part of the tokenization process. Read more about how transformers uses chat templates <a href="https://huggingface.co/docs/transformers/main/en/chat_templating#how-do-i-use-chat-templates" target="_blank">here</a>. All we have to do is structure our messages in the correct way and the tokenizer will take care of the rest.
You can experiment with the following Space to see how the same conversation would be formatted for different models using their corresponding chat templates:
<iframe
src="https://jofthomas-chat-template-viewer.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
### Messages to prompt
The easiest way to ensure your LLM receives a conversation correctly formatted is to use the `chat_template` from the model's tokenizer.
```python
messages = [
{"role": "system", "content": "You are an AI assistant with access to various tools."},
{"role": "user", "content": "Hi !"},
{"role": "assistant", "content": "Hi human, what can help you with ?"},
]
```
To convert the previous conversation into a prompt, we load the tokenizer and call `apply_chat_template`:
```python
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM2-1.7B-Instruct")
rendered_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
```
The `rendered_prompt` returned by this function is now ready to use as the input for the model you chose!
> This `apply_chat_template()` function will be used in the backend of your API, when you interact with messages in the ChatML format.
Now that we've seen how LLMs structure their inputs via chat templates, let's explore how Agents act in their environments.
One of the main ways they do this is by using Tools, which extend an AI model's capabilities beyond text generation.
We'll discuss messages again in upcoming units, but if you want a deeper dive now, check out:
- <a href="https://huggingface.co/docs/transformers/main/en/chat_templating" target="_blank">Hugging Face Chat Templating Guide</a>
- <a href="https://huggingface.co/docs/transformers" target="_blank">Transformers Documentation</a>
| agents-course/units/en/unit1/messages-and-special-tokens.mdx/0 | {
"file_path": "agents-course/units/en/unit1/messages-and-special-tokens.mdx",
"repo_id": "agents-course",
"token_count": 3020
} | 2 |
# What is `LangGraph`?
`LangGraph` is a framework developed by [LangChain](https://www.langchain.com/) **to manage the control flow of applications that integrate an LLM**.
## Is `LangGraph` different from `LangChain`?
LangChain provides a standard interface to interact with models and other components, useful for retrieval, LLM calls and tools calls.
The classes from LangChain might be used in LangGraph, but do not HAVE to be used.
The packages are different and can be used in isolation, but, in the end, all resources you will find online use both packages hand in hand.
## When should I use `LangGraph`?
### Control vs freedom
When designing AI applications, you face a fundamental trade-off between **control** and **freedom**:
- **Freedom** gives your LLM more room to be creative and tackle unexpected problems.
- **Control** allows you to ensure predictable behavior and maintain guardrails.
Code Agents, like the ones you can encounter in *smolagents*, are very free. They can call multiple tools in a single action step, create their own tools, etc. However, this behavior can make them less predictable and less controllable than a regular Agent working with JSON!
`LangGraph` is on the other end of the spectrum, it shines when you need **"Control"** on the execution of your agent.
LangGraph is particularly valuable when you need **Control over your applications**. It gives you the tools to build an application that follows a predictable process while still leveraging the power of LLMs.
Put simply, if your application involves a series of steps that need to be orchestrated in a specific way, with decisions being made at each junction point, **LangGraph provides the structure you need**.
As an example, let's say we want to build an LLM assistant that can answer some questions over some documents.
Since LLMs understand text the best, before being able to answer the question, you will need to convert other complex modalities (charts, tables) into text. However, that choice depends on the type of document you have!
This is a branching that I chose to represent as follow :
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/flow.png" alt="Control flow"/>
> 💡 **Tip:** The left part is not an agent, as here no tool call is involved. but the right part will need to write some code to query the xls ( convert to pandas and manipulate it ).
While this branching is deterministic, you can also design branching that are conditioned on the output of an LLM making them undeterministic.
The key scenarios where LangGraph excels include:
- **Multi-step reasoning processes** that need explicit control on the flow
- **Applications requiring persistence of state** between steps
- **Systems that combine deterministic logic with AI capabilities**
- **Workflows that need human-in-the-loop interventions**
- **Complex agent architectures** with multiple components working together
In essence, whenever possible, **as a human**, design a flow of actions based on the output of each action, and decide what to execute next accordingly. In this case, LangGraph is the correct framework for you!
`LangGraph` is, in my opinion, the most production-ready agent framework on the market.
## How does LangGraph work?
At its core, `LangGraph` uses a directed graph structure to define the flow of your application:
- **Nodes** represent individual processing steps (like calling an LLM, using a tool, or making a decision).
- **Edges** define the possible transitions between steps.
- **State** is user defined and maintained and passed between nodes during execution. When deciding which node to target next, this is the current state that we look at.
We will explore those fundamental blocks more in the next chapter!
## How is it different from regular python? Why do I need LangGraph?
You might wonder: "I could just write regular Python code with if-else statements to handle all these flows, right?"
While technically true, LangGraph offers **some advantages** over vanilla Python for building complex systems. You could build the same application without LangGraph, but it builds easier tools and abstractions for you.
It includes states, visualization, logging (traces), built-in human-in-the-loop, and more.
| agents-course/units/en/unit2/langgraph/when_to_use_langgraph.mdx/0 | {
"file_path": "agents-course/units/en/unit2/langgraph/when_to_use_langgraph.mdx",
"repo_id": "agents-course",
"token_count": 1021
} | 3 |
# Small Quiz (ungraded) [[quiz1]]
Let's test your understanding of `smolagents` with a quick quiz! Remember, testing yourself helps reinforce learning and identify areas that may need review.
This is an optional quiz and it's not graded.
### Q1: What is one of the primary advantages of choosing `smolagents` over other frameworks?
Which statement best captures a core strength of the `smolagents` approach?
<Question
choices={[
{
text: "It uses highly specialized configuration files and a steep learning curve to ensure only expert developers can use it",
explain: "smolagents is designed for simplicity and minimal code complexity, not steep learning curves.",
},
{
text: "It supports a code-first approach with minimal abstractions, letting agents interact directly via Python function calls",
explain: "Yes, smolagents emphasizes a straightforward, code-centric design with minimal abstractions.",
correct: true
},
{
text: "It focuses on JSON-based actions, removing the need for agents to write any code",
explain: "While smolagents supports JSON-based tool calls (ToolCallingAgents), the library emphasizes code-based approaches with CodeAgents.",
},
{
text: "It deeply integrates with a single LLM provider and specialized hardware",
explain: "smolagents supports multiple model providers and does not require specialized hardware.",
}
]}
/>
---
### Q2: In which scenario would you likely benefit most from using smolagents?
Which situation aligns well with what smolagents does best?
<Question
choices={[
{
text: "Prototyping or experimenting quickly with agent logic, particularly when your application is relatively straightforward",
explain: "Yes. smolagents is designed for simple and nimble agent creation without extensive setup overhead.",
correct: true
},
{
text: "Building a large-scale enterprise system where you need dozens of microservices and real-time data pipelines",
explain: "While possible, smolagents is more focused on lightweight, code-centric experimentation rather than heavy enterprise infrastructure.",
},
{
text: "Needing a framework that only supports cloud-based LLMs and forbids local inference",
explain: "smolagents offers flexible integration with local or hosted models, not exclusively cloud-based LLMs.",
},
{
text: "A scenario that requires advanced orchestration, multi-modal perception, and enterprise-scale features out-of-the-box",
explain: "While you can integrate advanced capabilities, smolagents itself is lightweight and minimal at its core.",
}
]}
/>
---
### Q3: smolagents offers flexibility in model integration. Which statement best reflects its approach?
Choose the most accurate description of how smolagents interoperates with LLMs.
<Question
choices={[
{
text: "It only provides a single built-in model and does not allow custom integrations",
explain: "smolagents supports multiple different backends and user-defined models.",
},
{
text: "It requires you to implement your own model connector for every LLM usage",
explain: "There are multiple prebuilt connectors that make LLM integration straightforward.",
},
{
text: "It only integrates with open-source LLMs but not commercial APIs",
explain: "smolagents can integrate with both open-source and commercial model APIs.",
},
{
text: "It can be used with a wide range of LLMs, offering predefined classes like TransformersModel, InferenceClientModel, and LiteLLMModel",
explain: "This is correct. smolagents supports flexible model integration through various classes.",
correct: true
}
]}
/>
---
### Q4: How does smolagents handle the debate between code-based actions and JSON-based actions?
Which statement correctly characterizes smolagents' philosophy about action formats?
<Question
choices={[
{
text: "It only allows JSON-based actions for all agent tasks, requiring a parser to extract the tool calls",
explain: "ToolCallingAgent uses JSON-based calls, but smolagents also provides a primary CodeAgent option that writes Python code.",
},
{
text: "It focuses on code-based actions via a CodeAgent but also supports JSON-based tool calls with a ToolCallingAgent",
explain: "Yes, smolagents primarily recommends code-based actions but includes a JSON-based alternative for users who prefer it or need it.",
correct: true
},
{
text: "It disallows any external function calls, instead requiring all logic to reside entirely within the LLM",
explain: "smolagents is specifically designed to grant LLMs the ability to call tools or code externally.",
},
{
text: "It requires users to manually convert every code snippet into a JSON object before running the agent",
explain: "smolagents can automatically manage code snippet creation within the CodeAgent path, no manual JSON conversion necessary.",
}
]}
/>
---
### Q5: How does smolagents integrate with the Hugging Face Hub for added benefits?
Which statement accurately describes one of the core advantages of Hub integration?
<Question
choices={[
{
text: "It automatically upgrades all public models to commercial license tiers",
explain: "Hub integration doesn't change the license tier for models or tools.",
},
{
text: "It disables local inference entirely, forcing remote model usage only",
explain: "Users can still do local inference if they prefer; pushing to the Hub doesn't override local usage.",
},
{
text: "It allows you to push and share agents or tools, making them easily discoverable and reusable by other developers",
explain: "smolagents supports uploading agents and tools to the HF Hub for others to reuse.",
correct: true
},
{
text: "It permanently stores all your code-based agents, preventing any updates or versioning",
explain: "Hub repositories support updates and version control, so you can revise your code-based agents any time.",
}
]}
/>
---
Congratulations on completing this quiz! 🎉 If you missed any questions, consider reviewing the *Why use smolagents* section for a deeper understanding. If you did well, you're ready to explore more advanced topics in smolagents!
| agents-course/units/en/unit2/smolagents/quiz1.mdx/0 | {
"file_path": "agents-course/units/en/unit2/smolagents/quiz1.mdx",
"repo_id": "agents-course",
"token_count": 1572
} | 4 |
# Claim Your Certificate 🎓
If you scored **above 30%, congratulations! 👏 You're now eligible to claim your official certificate.**
Follow the steps below to receive it:
1. Visit the [certificate page](https://huggingface.co/spaces/agents-course/Unit4-Final-Certificate).
2. **Sign in** with your Hugging Face account using the button provided.
3. **Enter your full name**. This is the name that will appear on your certificate.
4. Click **“Get My Certificate”** to verify your score and download your certificate.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit4/congrats.png" alt="Congrats!" />
Once you’ve got your certificate, feel free to:
- Add it to your **LinkedIn profile** 🧑💼
- Share it on **X**, **Bluesky**, etc. 🎉
**Don’t forget to tag [@huggingface](https://huggingface.co/huggingface). We’d be super proud and we’d love to cheer you on! 🤗**
<Tip>
If you have any issues with submission please open a discussion item on [The certification community tab](https://huggingface.co/spaces/agents-course/Unit4-Final-Certificate/discussions).
</Tip>
| agents-course/units/en/unit4/get-your-certificate.mdx/0 | {
"file_path": "agents-course/units/en/unit4/get-your-certificate.mdx",
"repo_id": "agents-course",
"token_count": 352
} | 5 |
# Introducción
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit3/pokemon_thumbnail.png" alt="Unidad Bonus 3 IA en Juegos"/>
🎶 ¡Quiero ser el mejor...! 🎶
¡Bienvenido a esta **unidad bonus**, donde explorarás la emocionante intersección entre los **Agentes de IA y los juegos**! 🎮🤖
Imagina un juego donde los personajes no jugables (PNJs) no solo siguen líneas de diálogo predefinidas, sino que mantienen conversaciones dinámicas, se adaptan a tus estrategias y evolucionan a medida que se desarrolla la historia. Este es el poder de combinar **LLMs y comportamiento agéntico en los juegos**: abre la puerta a **narrativas y jugabilidad emergentes como nunca antes**.
En esta unidad bonus, tú:
- Aprenderás a construir un Agente de IA que pueda participar en **batallas por turnos al estilo Pokémon**
- Jugarás contra él, o incluso desafiarás a otros agentes en línea
Ya hemos visto [algunos](https://www.anthropic.com/research/visible-extended-thinking) [ejemplos](https://www.twitch.tv/gemini_plays_pokemon) de la comunidad de IA para jugar Pokémon usando LLMs, y en esta unidad aprenderás cómo puedes replicar eso usando tu propio Agente con las ideas que has aprendido a lo largo del curso.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit3/claude-plays-pokemon.png" alt="Claude juega Pokémon"/>
## ¿Quieres ir más allá?
- 🎓 **Domina los LLMs en Juegos**: Sumérgete más en el desarrollo de juegos con nuestro curso completo [Curso de Aprendizaje Automático para Juegos](https://hf.co/learn/ml-games-course).
- 📘 **Obtén el Manual de IA**: Descubre ideas, perspectivas y consejos prácticos en el [Manual de IA para Desarrolladores de Juegos](https://thomassimonini.substack.com/), donde se explora el futuro del diseño inteligente de juegos.
Pero antes de construir, veamos cómo ya se están utilizando los LLMs en los juegos con **cuatro inspiradores ejemplos del mundo real**.
| agents-course/units/es/bonus-unit3/introduction.mdx/0 | {
"file_path": "agents-course/units/es/bonus-unit3/introduction.mdx",
"repo_id": "agents-course",
"token_count": 751
} | 6 |
### P1: ¿Qué es un Agente?
¿Cuál de las siguientes opciones describe mejor a un Agente de IA?
<Question
choices={[
{
text: "Un modelo de IA que puede razonar, planificar y usar herramientas para interactuar con su entorno para lograr un objetivo específico.",
explain: "Esta definición captura las características esenciales de un Agente.",
correct: true
},
{
text: "Un sistema que únicamente procesa texto estático, sin ningún mecanismo inherente para interactuar dinámicamente con su entorno o ejecutar acciones significativas.",
explain: "Un Agente debe ser capaz de realizar una acción e interactuar con su entorno.",
},
{
text: "Un agente conversacional restringido a responder consultas, sin la capacidad de realizar acciones o interactuar con sistemas externos.",
explain: "Un chatbot como este carece de la capacidad de realizar acciones, lo que lo hace diferente de un Agente.",
},
{
text: "Un repositorio de información en línea que ofrece contenido estático sin la capacidad de ejecutar tareas o interactuar activamente con los usuarios.",
explain: "Un Agente interactúa activamente con su entorno en lugar de solo proporcionar información estática.",
}
]}
/>
---
### P2: ¿Cuál es el Papel de la Planificación en un Agente?
¿Por qué un Agente necesita planificar antes de realizar una acción?
<Question
choices={[
{
text: "Para principalmente almacenar o recordar interacciones pasadas, en lugar de mapear una secuencia de acciones futuras.",
explain: "La planificación se trata de determinar acciones futuras, no de almacenar interacciones pasadas.",
},
{
text: "Para decidir sobre la secuencia de acciones y seleccionar las herramientas apropiadas necesarias para cumplir con la solicitud del usuario.",
explain: "La planificación ayuda al Agente a determinar los mejores pasos y herramientas para completar una tarea.",
correct: true
},
{
text: "Para ejecutar una secuencia de acciones arbitrarias y no coordinadas que carecen de cualquier estrategia definida u objetivo intencional.",
explain: "La planificación asegura que las acciones del Agente sean intencionales y no aleatorias.",
},
{
text: "Para simplemente convertir o traducir texto, evitando cualquier proceso de formulación de una secuencia deliberada de acciones o empleo de razonamiento estratégico.",
explain: "La planificación se trata de estructurar acciones, no solo de convertir texto.",
}
]}
/>
---
### P3: ¿Cómo Mejoran las Herramientas las Capacidades de un Agente?
¿Por qué las herramientas son esenciales para un Agente?
<Question
choices={[
{
text: "Las herramientas no sirven para ningún propósito real y no contribuyen a la capacidad del Agente para realizar acciones más allá de la generación básica de texto.",
explain: "Las herramientas amplían las capacidades de un Agente al permitirle realizar acciones más allá de la generación de texto.",
},
{
text: "Las herramientas están diseñadas únicamente para el almacenamiento de memoria, careciendo de cualquier capacidad para facilitar la ejecución de tareas o mejorar el rendimiento interactivo.",
explain: "Las herramientas son principalmente para realizar acciones, no solo para almacenar datos.",
},
{
text: "Las herramientas restringen severamente al Agente exclusivamente a generar texto, impidiéndole así participar en una gama más amplia de acciones interactivas.",
explain: "Por el contrario, las herramientas permiten a los Agentes ir más allá de las respuestas basadas en texto.",
},
{
text: "Las herramientas proporcionan al Agente la capacidad de ejecutar acciones que un modelo de generación de texto no puede realizar de forma nativa, como hacer café o generar imágenes.",
explain: "Las herramientas permiten a los Agentes interactuar con el mundo real y completar tareas.",
correct: true
}
]}
/>
---
### P4: ¿Cómo Difieren las Acciones de las Herramientas?
¿Cuál es la diferencia clave entre Acciones y Herramientas?
<Question
choices={[
{
text: "Las Acciones son los pasos que toma el Agente, mientras que las Herramientas son recursos externos que el Agente puede usar para realizar esas acciones.",
explain: "Las Acciones son objetivos de nivel superior, mientras que las Herramientas son funciones específicas que el Agente puede invocar.",
correct: true
},
{
text: "Las Acciones y las Herramientas son componentes completamente idénticos que pueden usarse indistintamente, sin diferencias claras entre ellos.",
explain: "No, las Acciones son metas o tareas, mientras que las Herramientas son utilidades específicas que el Agente usa para lograrlas.",
},
{
text: "Las Herramientas se consideran utilidades amplias disponibles para diversas funciones, mientras que las Acciones se consideran erróneamente restringidas solo a interacciones físicas.",
explain: "No necesariamente. Las Acciones pueden involucrar tanto tareas digitales como físicas.",
},
{
text: "Las Acciones inherentemente requieren el uso de LLMs para ser determinadas y ejecutadas, mientras que las Herramientas están diseñadas para funcionar de manera autónoma sin tales dependencias.",
explain: "Si bien los LLMs ayudan a decidir las Acciones, las Acciones en sí no dependen de los LLMs.",
}
]}
/>
---
### P5: ¿Qué Papel Juegan los Modelos de Lenguaje Grandes (LLMs) en los Agentes?
¿Cómo contribuyen los LLMs a la funcionalidad de un Agente?
<Question
choices={[
{
text: "Los LLMs funcionan meramente como repositorios pasivos que almacenan información, careciendo de cualquier capacidad para procesar activamente la entrada o producir respuestas dinámicas.",
explain: "Los LLMs procesan activamente la entrada de texto y generan respuestas, en lugar de solo almacenar información.",
},
{
text: "Los LLMs sirven como el 'cerebro' de razonamiento del Agente, procesando entradas de texto para entender instrucciones y planificar acciones.",
explain: "Los LLMs permiten al Agente interpretar, planificar y decidir sobre los próximos pasos.",
correct: true
},
{
text: "Los LLMs se cree erróneamente que se utilizan únicamente para el procesamiento de imágenes, cuando en realidad su función principal es procesar y generar texto.",
explain: "Los LLMs trabajan principalmente con texto, aunque a veces pueden interactuar con entradas multimodales.",
},
{
text: "Los LLMs se consideran completamente irrelevantes para la operación de los Agentes de IA, lo que implica que son totalmente superfluos en cualquier aplicación práctica.",
explain: "Los LLMs son un componente central de los Agentes de IA modernos.",
}
]}
/>
---
### P6: ¿Cuál de los Siguientes Demuestra Mejor un Agente de IA?
¿Qué ejemplo del mundo real ilustra mejor el funcionamiento de un Agente de IA?
<Question
choices={[
{
text: "Una página de preguntas frecuentes estática en un sitio web que proporciona información fija y carece de capacidades de respuesta interactiva o dinámica.",
explain: "Una página de preguntas frecuentes estática no interactúa dinámicamente con los usuarios ni realiza acciones.",
},
{
text: "Una calculadora simple que realiza operaciones aritméticas basadas en reglas fijas, sin ninguna capacidad de razonamiento o planificación.",
explain: "Una calculadora sigue reglas fijas sin razonamiento o planificación, por lo que no es un Agente.",
},
{
text: "Un asistente virtual como Siri o Alexa que puede entender comandos hablados, razonar a través de ellos y realizar tareas como establecer recordatorios o enviar mensajes.",
explain: "Este ejemplo incluye razonamiento, planificación e interacción con el entorno.",
correct: true
},
{
text: "Un NPC de videojuego que opera en un guión fijo de respuestas, sin la capacidad de razonar, planificar o usar herramientas externas.",
explain: "A menos que el NPC pueda razonar, planificar y usar herramientas, no funciona como un Agente de IA.",
}
]}
/>
---
¡Felicidades por terminar este Quiz 🥳! Si necesitas revisar algún elemento, tómate el tiempo para volver al capítulo y reforzar tu conocimiento antes de profundizar en el "cerebro del Agente": los LLMs.
| agents-course/units/es/unit1/quiz1.mdx/0 | {
"file_path": "agents-course/units/es/unit1/quiz1.mdx",
"repo_id": "agents-course",
"token_count": 2808
} | 7 |
# Usando Agentes en LlamaIndex
¿Recuerdas a Alfred, nuestro agente mayordomo útil de antes? ¡Bueno, está a punto de recibir una mejora! Ahora que entendemos las herramientas disponibles en LlamaIndex, podemos darle a Alfred nuevas capacidades para servirnos mejor.
Pero antes de continuar, recordemos qué hace funcionar a un agente como Alfred. En la Unidad 1, aprendimos que:
> Un Agente es un sistema que aprovecha un modelo de IA para interactuar con su entorno y lograr un objetivo definido por el usuario. Combina razonamiento, planificación y ejecución de acciones (a menudo a través de herramientas externas) para cumplir tareas.
LlamaIndex soporta **tres tipos principales de agentes de razonamiento**:

1. `Agentes de Llamada de Funciones` - Estos trabajan con modelos de IA que pueden llamar funciones específicas.
2. `Agentes ReAct` - Estos pueden trabajar con cualquier IA que tenga un endpoint de chat o texto y manejar tareas de razonamiento complejas.
3. `Agentes Personalizados Avanzados` - Estos usan métodos más complejos para manejar tareas y flujos de trabajo.
<Tip>FEncuentra más información sobre agentes avanzados en <a href="https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/agent/workflow/base_agent.py">BaseWorkflowAgent</a></Tip>
## Inicializando Agentes
<Tip>
Puedes seguir el código en <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/llama-index/agents.ipynb" target="_blank">este notebook</a> que puedes ejecutar usando Google Colab.
</Tip>
Para crear un agente, comenzamos proporcionándole un **conjunto de funciones/herramientas que definen sus capacidades**. Veamos cómo crear un agente con algunas herramientas básicas. Al momento de escribir esto, el agente usará automáticamente la API de llamada de funciones (si está disponible), o un bucle de agente ReAct estándar.
Los LLMs que soportan una API de herramientas/funciones son relativamente nuevos, pero proporcionan una forma poderosa de llamar herramientas al evitar indicaciones específicas y permitir que el LLM cree llamadas a herramientas basadas en esquemas proporcionados.
Los agentes ReAct también son buenos en tareas de razonamiento complejas y pueden trabajar con cualquier LLM que tenga capacidades de chat o completación de texto. Son más verbosos y muestran el razonamiento detrás de ciertas acciones que toman.
```python
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
from llama_index.core.agent.workflow import AgentWorkflow
from llama_index.core.tools import FunctionTool
# define una herramienta de muestra -- ¡las anotaciones de tipo, nombres de funciones y docstrings están incluidos en los esquemas analizados!
def multiply(a: int, b: int) -> int:
"""Multiplica dos enteros y devuelve el entero resultante"""
return a * b
# inicializa el llm
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
# inicializa el agente
agent = AgentWorkflow.from_tools_or_functions(
[FunctionTool.from_defaults(multiply)],
llm=llm
)
```
**Los agentes son sin estado por defecto**, recordar interacciones pasadas es opcional usando un objeto `Context`. Esto puede ser útil si quieres usar un agente que necesita recordar interacciones previas, como un chatbot que mantiene el contexto a través de múltiples mensajes o un administrador de tareas que necesita rastrear el progreso a lo largo del tiempo.
```python
# sin estado
response = await agent.run("¿Cuánto es 2 por 2?")
# recordando estado
from llama_index.core.workflow import Context
ctx = Context(agent)
response = await agent.run("Mi nombre es Bob.", ctx=ctx)
response = await agent.run("¿Cuál era mi nombre de nuevo?", ctx=ctx)
```
Notarás que los agentes en `LlamaIndex` son asíncronos porque usan el operador await de Python. Si eres nuevo en el código asíncrono en Python, o necesitas un repaso, tienen una excelente [guía de async](https://docs.llamaindex.ai/en/stable/getting_started/async_python/).
Ahora que tenemos los conceptos básicos, echemos un vistazo a como podemos usar herramientas más complejas en nuestros agentes.
## Creando Agentes RAG con QueryEngineTools
**Agentic RAG es una forma poderosa de usar agentes para responder preguntas sobre tus datos**. Podemos pasar varias herramientas a Alfred para ayudarlo a responder preguntas. Sin embargo, en lugar de responder la pregunta sobre los documentos automáticamente, Alfred puede decidir usar cualquier otra herramienta o flujo para responder la pregunta.

Es fácil **envolver `QueryEngine` como una herramienta** para un agente.
Al hacerlo, necesitamos **definir un nombre y una descripción**. El LLM usará esta información para usar la herramienta correctamente.
Veamos cómo cargar un `QueryEngineTool` usando el QueryEngine que creamos en la [sección de componentes](02_components).
It is easy to **wrap `QueryEngine` as a tool** for an agent.
When doing so, we need to **define a name and description**. The LLM will use this information to correctly use the tool.
Let's see how to load in a `QueryEngineTool` using the `QueryEngine` we created in the [component section](02_components).
```python
from llama_index.core.tools import QueryEngineTool
query_engine = index.as_query_engine(llm=llm, similarity_top_k=3) # como se mostró en la sección anterior
query_engine_tool = QueryEngineTool.from_defaults(
query_engine=query_engine,
name="nombre",
description="una descripción específica",
return_direct=False,
)
query_engine_agent = AgentWorkflow.from_tools_or_functions(
[query_engine_tool],
llm=llm,
system_prompt="Eres un asistente útil que tiene acceso a una base de datos que contiene descripciones de personas. "
)
```
## Creando Sistemas Multi-agente
La clase `AgentWorkflow` también soporta directamente sistemas multi-agente. Al darle a cada agente un nombre y una descripción, el sistema mantiene un único hablante activo, con cada agente teniendo la capacidad de pasar el control a otro agente.
Al reducir el alcance de cada agente, podemos ayudar a aumentar su precisión general al responder a los mensajes del usuario.
Los agentes en LlamaIndex también pueden usarse directamente como herramientas para otros agentes, para escenarios más complejos y personalizados.
```python
from llama_index.core.agent.workflow import (
AgentWorkflow,
FunctionAgent,
ReActAgent,
)
# Define algunas herramientas
def add(a: int, b: int) -> int:
"""Suma dos números."""
return a + b
def subtract(a: int, b: int) -> int:
"""Resta dos números."""
return a - b
# Crea configuraciones de agentes
# NOTA: podemos usar FunctionAgent o ReActAgent aquí.
# FunctionAgent funciona para LLMs con una API de llamada de funciones.
# ReActAgent funciona para cualquier LLM.
calculator_agent = ReActAgent(
name="calculadora",
description="Realiza operaciones aritméticas básicas",
system_prompt="Eres un asistente de calculadora. Usa tus herramientas para cualquier operación matemática.",
tools=[add, subtract],
llm=llm,
)
query_agent = ReActAgent(
name="búsqueda_de_información",
description="Busca información sobre XYZ",
system_prompt="Usa tu herramienta para consultar un sistema RAG y responder información sobre XYZ",
tools=[query_engine_tool],
llm=llm
)
# Crea y ejecuta el flujo de trabajo
agent = AgentWorkflow(
agents=[calculator_agent, query_agent], root_agent="calculadora"
)
# Ejecuta el sistema
response = await agent.run(user_msg="¿Puedes sumar 5 y 3?")
```
<Tip>¿No has aprendido lo suficiente todavía? Hay mucho más por descubrir sobre agentes y herramientas en LlamaIndex dentro de la <a href="https://docs.llamaindex.ai/en/stable/examples/agent/agent_workflow_basic/"> Introducción Básica a AgentWorkflow </a> o la <a href="https://docs.llamaindex.ai/en/stable/understanding/agent/">Guía de Aprendizaje de Agentes</a>, ¡donde puedes leer más sobre streaming, serialización de contexto y humano-en-el-bucle!</Tip>
Ahora que entendemos los conceptos básicos de agentes y herramientas en LlamaIndex, ¡veamos cómo podemos usar LlamaIndex para **crear flujos de trabajo configurables y manejables!**
| agents-course/units/es/unit2/llama-index/agents.mdx/0 | {
"file_path": "agents-course/units/es/unit2/llama-index/agents.mdx",
"repo_id": "agents-course",
"token_count": 3092
} | 8 |
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/retrieval_agents.ipynb"},
]} />
# Construyendo Sistemas RAG con Agentes
<Tip>
Puedes seguir el código en <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/retrieval_agents.ipynb" target="_blank">este notebook</a> que puedes ejecutar usando Google Colab.
</Tip>
Los sistemas de Generación Aumentada por Recuperación (RAG) combinan las capacidades de recuperación de datos y modelos de generación para proporcionar respuestas contextualizadas. Por ejemplo, la consulta de un usuario se pasa a un motor de búsqueda, y los resultados recuperados se entregan al modelo junto con la consulta. El modelo luego genera una respuesta basada en la consulta y la información recuperada.
El RAG con Agentes (Generación Aumentada por Recuperación) extiende los sistemas RAG tradicionales al **combinar agentes autónomos con recuperación dinámica de conocimiento**.
Mientras que los sistemas RAG tradicionales utilizan un LLM para responder consultas basadas en datos recuperados, el RAG con agentes **permite un control inteligente tanto de los procesos de recuperación como de generación**, mejorando la eficiencia y precisión.
Los sistemas RAG tradicionales enfrentan limitaciones clave, como **depender de un solo paso de recuperación** y enfocarse en la similitud semántica directa con la consulta del usuario, lo que puede pasar por alto información relevante.
El RAG con agentes aborda estos problemas permitiendo que el agente formule autónomamente consultas de búsqueda, critique los resultados recuperados y realice múltiples pasos de recuperación para obtener un resultado más personalizado y completo.
## Recuperación Básica con DuckDuckGo
Vamos a construir un agente simple que pueda buscar en la web usando DuckDuckGo. Este agente recuperará información y sintetizará respuestas para contestar consultas. Con RAG con agentes, el agente de Alfred puede:
* Buscar las últimas tendencias en fiestas de superhéroes
* Refinar resultados para incluir elementos de lujo
* Sintetizar información en un plan completo
Así es como el agente de Alfred puede lograr esto:
```python
from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel
# Initialize the search tool
search_tool = DuckDuckGoSearchTool()
# Initialize the model
model = InferenceClientModel()
agent = CodeAgent(
model=model,
tools=[search_tool]
)
# Example usage
response = agent.run(
"Buscar ideas de fiesta temática de superhéroes de lujo, incluyendo decoración, entretenimiento y catering."
)
print(response)
```
El agente sigue este proceso:
1. **Analiza la Solicitud:** El agente de Alfred identifica los elementos clave de la consulta—planificación de fiesta temática de superhéroes de lujo, con enfoque en decoración, entretenimiento y catering.
2. **Realiza la Recuperación:** El agente utiliza DuckDuckGo para buscar la información más relevante y actualizada, asegurándose de que se alinee con las preferencias refinadas de Alfred para un evento lujoso.
3. **Sintetiza la Información:** Después de recopilar los resultados, el agente los procesa en un plan coherente y accionable para Alfred, cubriendo todos los aspectos de la fiesta.
4. **Almacena para Referencia Futura:** El agente almacena la información recuperada para un fácil acceso al planificar eventos futuros, optimizando la eficiencia en tareas posteriores.
## Herramienta de Base de Conocimiento Personalizada
Para tareas especializadas, una base de conocimiento personalizada puede ser invaluable. Vamos a crear una herramienta que consulte una base de datos vectorial de documentación técnica o conocimiento especializado. Utilizando búsqueda semántica, el agente puede encontrar la información más relevante para las necesidades de Alfred.
Una base de datos vectorial es simplemente una colección de documentos con representaciones enriquecidas por modelos de ML especializados, que permiten la búsqueda y recuperación rápida de los documentos.
Este enfoque combina conocimiento predefinido con búsqueda semántica para proporcionar soluciones contextualizadas para la planificación de eventos. Con acceso a conocimiento especializado, Alfred puede perfeccionar cada detalle de la fiesta.
En este ejemplo, crearemos una herramienta que recupera ideas de planificación de fiestas desde una base de conocimiento personalizada. Usaremos un recuperador BM25 para buscar en la base de conocimiento y devolver los mejores resultados, y `RecursiveCharacterTextSplitter` para dividir los documentos en fragmentos más pequeños para una búsqueda más eficiente.
```python
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from smolagents import Tool
from langchain_community.retrievers import BM25Retriever
from smolagents import CodeAgent, InferenceClientModel
class PartyPlanningRetrieverTool(Tool):
name = "party_planning_retriever"
description = "Utiliza búsqueda semántica para recuperar ideas de planificación de fiestas relevantes para la fiesta temática de superhéroes de Alfred en Wayne Manor."
inputs = {
"query": {
"type": "string",
"description": "La consulta a realizar. Esta debe ser una consulta relacionada con la planificación de fiestas o temas de superhéroes.",
}
}
output_type = "string"
def __init__(self, docs, **kwargs):
super().__init__(**kwargs)
self.retriever = BM25Retriever.from_documents(
docs, k=5 # Retrieve the top 5 documents
)
def forward(self, query: str) -> str:
assert isinstance(query, str), "Tu consulta de búsqueda debe ser una cadena"
docs = self.retriever.invoke(
query,
)
return "\nIdeas recuperadas:\n" + "".join(
[
f"\n\n===== Idea {str(i)} =====\n" + doc.page_content
for i, doc in enumerate(docs)
]
)
# Simulate a knowledge base about party planning
party_ideas = [
{"text": "Una fiesta de disfraces temática de superhéroes con decoración de lujo, incluyendo detalles dorados y cortinas de terciopelo.", "source": "Ideas de fiesta 1"},
{"text": "Contrata a un DJ profesional que pueda tocar música temática para superhéroes como Batman y Wonder Woman.", "source": "Ideas de entretenimiento"},
{"text": "Para el catering, sirve platos con nombres de superhéroes, como 'El smoothie verde de Hulk' y 'El filete de poder de Iron Man'.", "source": "Ideas de catering"},
{"text": "Decora con logotipos icónicos de superhéroes y proyecciones de Gotham y otras ciudades de superhéroes alrededor del lugar.", "source": "Ideas de decoración"},
{"text": "Experiencias interactivas con realidad virtual donde los invitados pueden participar en simulaciones de superhéroes o competir en juegos temáticos.", "source": "Ideas de entretenimiento"}
]
source_docs = [
Document(page_content=doc["text"], metadata={"source": doc["source"]})
for doc in party_ideas
]
# Split the documents into smaller chunks for more efficient search
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50,
add_start_index=True,
strip_whitespace=True,
separators=["\n\n", "\n", ".", " ", ""],
)
docs_processed = text_splitter.split_documents(source_docs)
# Create the retriever tool
party_planning_retriever = PartyPlanningRetrieverTool(docs_processed)
# Initialize the agent
agent = CodeAgent(tools=[party_planning_retriever], model=InferenceClientModel())
# Example usage
response = agent.run(
"Encuentra ideas para una fiesta temática de superhéroes de lujo, incluyendo entretenimiento, catering y opciones de decoración."
)
print(response)
```
Este agente mejorado puede:
1. Primero verificar la documentación para obtener información relevante
2. Combinar ideas de la base de conocimiento
3. Mantener el contexto de la conversación en memoria
## Capacidades de Recuperación Mejoradas
Al construir sistemas RAG con agentes, el agente puede emplear estrategias sofisticadas como:
1. **Reformulación de Consultas:** En lugar de usar la consulta del usuario en bruto, el agente puede elaborar términos de búsqueda optimizados que coincidan mejor con los documentos objetivo
2. **Recuperación Multi-Paso:** El agente puede realizar múltiples búsquedas, utilizando los resultados iniciales para informar consultas posteriores
3. **Integración de Fuentes:** La información puede combinarse de múltiples fuentes como búsqueda web y documentación local
4. **Validación de Resultados:** El contenido recuperado puede analizarse para determinar su relevancia y precisión antes de incluirlo en las respuestas
Los sistemas RAG con agentes efectivos requieren una consideración cuidadosa de varios aspectos clave. El agente **debe seleccionar entre las herramientas disponibles según el tipo de consulta y el contexto**. Los sistemas de memoria ayudan a mantener el historial de conversación y evitar recuperaciones repetitivas. Tener estrategias de respaldo garantiza que el sistema pueda seguir proporcionando valor incluso cuando los métodos de recuperación principales fallan. Además, implementar pasos de validación ayuda a garantizar la precisión y relevancia de la información recuperada.
## Recursos
- [RAG con Agentes: ¡potencia tu RAG con reformulación de consultas y auto-consulta! 🚀](https://huggingface.co/learn/cookbook/agent_rag) - Receta para desarrollar un sistema RAG con Agentes utilizando smolagents.
| agents-course/units/es/unit2/smolagents/retrieval_agents.mdx/0 | {
"file_path": "agents-course/units/es/unit2/smolagents/retrieval_agents.mdx",
"repo_id": "agents-course",
"token_count": 3440
} | 9 |
# Práctica
Ahora que estás listo/a para profundizar en la creación de tu agente final, veamos cómo puedes enviarlo para su revisión.
## El Conjunto de Datos (Dataset)
El conjunto de datos utilizado en esta tabla de clasificación consta de 20 preguntas extraídas de las preguntas de nivel 1 del conjunto de **validación** de GAIA.
Las preguntas elegidas se filtraron según la cantidad de herramientas y pasos necesarios para responder una pregunta.
Basándonos en el estado actual del benchmark GAIA, creemos que intentar alcanzar un 30% en las preguntas de nivel 1 es una prueba justa.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit4/leaderboard%20GAIA%2024%3A04%3A2025.png" alt="¡Estado actual de GAIA!" />
## El Proceso
Ahora, la gran pregunta en tu mente probablemente sea: "¿Cómo empiezo a enviar?"
Para esta Unidad, creamos una API que te permitirá obtener las preguntas y enviar tus respuestas para su puntuación.
Aquí tienes un resumen de las rutas (consulta la [documentación en vivo](https://agents-course-unit4-scoring.hf.space/docs) para detalles interactivos):
* **`GET /questions`**: Recupera la lista completa de preguntas de evaluación filtradas.
* **`GET /random-question`**: Obtiene una única pregunta aleatoria de la lista.
* **`GET /files/{task_id}`**: Descarga un archivo específico asociado con un ID de tarea determinado.
* **`POST /submit`**: Envía las respuestas del agente, calcula la puntuación y actualiza la tabla de clasificación.
La función de envío comparará la respuesta con la verdad fundamental (ground truth) de manera de **COINCIDENCIA EXACTA** (exact match), ¡así que dale un buen prompt! El equipo de GAIA compartió un ejemplo de prompting para tu agente [aquí](https://huggingface.co/spaces/gaia-benchmark/leaderboard).
🎨 **¡Haz Tuya la Plantilla!**
Para demostrar el proceso de interacción con la API, hemos incluido una [plantilla básica](https://huggingface.co/spaces/agents-course/Final_Assignment_Template) como punto de partida.
Por favor, siéntete libre —y te **animamos activamente**— a cambiarla, agregarle elementos o reestructurarla por completo. Modifícala de cualquier manera que se adapte mejor a tu enfoque y creatividad.
Para enviar usando esta plantilla, calcula 3 cosas necesarias para la API:
* **Nombre de usuario (`Username`):** Tu nombre de usuario de Hugging Face (aquí obtenido mediante el inicio de sesión de Gradio), que se utiliza para identificar tu envío.
* **Enlace al Código (`agent_code`):** La URL que enlaza al código de tu Space de Hugging Face (`.../tree/main`) para fines de verificación, así que por favor mantén tu Space público.
* **Respuestas (`answers`):** La lista de respuestas (`{"task_id": ..., "submitted_answer": ...}`) generadas por tu Agente para la puntuación.
Por lo tanto, te animamos a comenzar duplicando esta [plantilla](https://huggingface.co/spaces/agents-course/Final_Assignment_Template) en tu propio perfil de Hugging Face.
🏆 Revisa la tabla de clasificación [aquí](https://huggingface.co/spaces/agents-course/Students_leaderboard)
*Nota amistosa: ¡Esta tabla de clasificación es por diversión! Sabemos que es posible enviar puntuaciones sin una verificación completa. Si vemos demasiadas puntuaciones altas publicadas sin un enlace público que las respalde, es posible que necesitemos revisar, ajustar o eliminar algunas entradas para mantener la tabla de clasificación útil.*
La tabla de clasificación mostrará el enlace a la base de código de tu Space; dado que esta tabla es solo para estudiantes, por favor mantén tu Space público si obtienes una puntuación de la que estés orgulloso/a.
<iframe
src="https://agents-course-students-leaderboard.hf.space"
frameborder="0"
width="850"
height="450"
></iframe> | agents-course/units/es/unit4/hands-on.mdx/0 | {
"file_path": "agents-course/units/es/unit4/hands-on.mdx",
"repo_id": "agents-course",
"token_count": 1394
} | 10 |
# Lancer l'agent de combat Pokémon
Il est maintenant temps de combattre ! ⚡️
## **Combattez l'agent de Stream !**
Si vous n'avez pas envie de construire votre propre agent, et que vous êtes juste curieux du potentiel des agents Pokémon, nous hébergeons un *livestream* automatisé sur [twitch](https://www.twitch.tv/jofthomas).
<iframe
src="https://jofthomas-twitch-streaming.hf.space"
frameborder="0"
width="1200"
height="600"
></iframe>
Pour combattre l'agent vous pouvez :
1. Allez sur le ***Space* Pokémon Showdown** [ici](https://huggingface.co/spaces/Jofthomas/Pokemon_showdown)
2. **Choisissez votre nom** (coin supérieur droit).
3. Trouvez le **nom d'utilisateur de l'agent actuel**. Pour cela vérifiez **l'affichage du *Stream*** [ici](https://www.twitch.tv/jofthomas).
4. **Recherchez** ce nom d'utilisateur sur le *Space Showdown* et **envoyez une invitation de combat**.
*Attention :* Un seul agent est en ligne à la fois ! Assurez-vous d'avoir le bon nom.
## Challenger d'agent de combat Pokémon
Si vous avez créé votre propre agent de combat Pokémon depuis la dernière section, vous vous demandez probablement : **comment puis-je le tester face à d'autres ?** Découvrons ça !
Nous avons construit un [*Space* Hugging Face](https://huggingface.co/spaces/PShowdown/pokemon_agents) dédié à cet effet :
<iframe
src="https://pshowdown-pokemon-agents.hf.space"
frameborder="0"
width="1200"
height="600"
></iframe>
Il est connecté à notre propre **serveur Pokémon Showdown**, où votre agent peut en affronter d'autres dans des combats épiques.
### Comment lancer votre agent
Suivez ces étapes pour donner vie à votre agent dans l'arène :
1. **Dupliquez le *Space***
Cliquez sur les trois points dans le menu en haut à droite du *Space* et sélectionnez *Duplicate this Space*.
2. **Ajoutez le code de votre agent à `agent.py`**
Ouvrez le fichier et collez votre implémentation. Vous pouvez suivre cet [exemple](https://huggingface.co/spaces/PShowdown/pokemon_agents/blob/main/agents.py) ou consultez la [structure du projet](https://huggingface.co/spaces/PShowdown/pokemon_agents/tree/main) pour des conseils.
3. **Enregistrez votre agent dans `app.py`**
Ajoutez le nom et la logique de votre agent au menu déroulant. Référez-vous à [cet extrait de code](https://huggingface.co/spaces/PShowdown/pokemon_agents/blob/main/app.py) pour l'inspiration.
4. **Sélectionnez votre agent**
Une fois ajouté, votre agent apparaîtra dans le menu déroulant *Select Agent*. Choisissez-le dans la liste ! ✅
5. **Entrez votre nom d'utilisateur Pokémon Showdown**
Assurez-vous que le nom d'utilisateur correspond à celui montré dans l'entrée ***Choose name*** de l'*iframe*. Vous pouvez aussi vous connecter avec votre compte officiel.
6. **Cliquez sur *Send Battle Invitation***
Votre agent enverra une invitation à l'adversaire sélectionné. Elle devrait apparaître à l'écran !
7. **Accepter la bataille et profiter du combat !**
Que le combat commence ! Que l'agent le plus performant gagne
Prêt à voir votre création en action ? Que le combat IA commence ! 🥊
| agents-course/units/fr/bonus-unit3/launching_agent_battle.mdx/0 | {
"file_path": "agents-course/units/fr/bonus-unit3/launching_agent_battle.mdx",
"repo_id": "agents-course",
"token_count": 1154
} | 11 |
# Quiz rapide 2 [[quiz2]]
Hein ?! Un autre quiz ? On sait, on sait, ... 😅 Mais ce court quiz non noté est là pour **vous aider à renforcer les concepts clés que vous venez d'apprendre**.
Ce quiz porte sur les LLM, les systèmes de messages et les outils ; des composants essentiels pour comprendre et construire des agents.
### Q1 : Laquelle des propositions suivantes décrit le mieux un outil ?
<Question
choices={[
{
text: "Un processus qui ne génère que des réponses textuelles",
explain: "",
},
{
text: "Un processus exécutable ou une API externe qui permet aux agents d'effectuer des tâches spécifiques et d'interagir avec des environnements externes",
explain: "Les outils sont des fonctions exécutables que les agents peuvent utiliser pour effectuer des tâches spécifiques et interagir avec des environnements externes.",
correct: true
},
{
text: "Une fonctionnalité qui stocke les conversations de l'agent",
explain: "",
}
]}
/>
---
### Q2 : Comment les agents utilisent-ils les outils comme moyen d'« agir » dans un environnement ?
<Question
choices={[
{
text: "En attendant passivement les instructions de l'utilisateur",
explain: "",
},
{
text: "En utilisant uniquement des réponses préprogrammées",
explain: "",
},
{
text: "En demandant au LLM de générer du code d'invocation d'outil lorsque cela est approprié et en exécutant les outils pour le compte du modèle",
explain: "Les agents peuvent invoquer des outils et utiliser leur capacité de raisonnement pour planifier et replanifier en fonction des informations obtenues.",
correct: true
}
]}
/>
---
### Q3 : Qu'est-ce qu'un LLM ?
<Question
choices={[
{
text: "Un simple chatbot conçu pour répondre avec des réponses prédéfinies",
explain: "",
},
{
text: "Un modèle d'apprentissage profond entraîné sur de grandes quantités de texte pour comprendre et générer un langage semblable à celui des humains",
explain: "",
correct: true
},
{
text: "Une IA basée sur des règles qui suit des commandes strictement prédéfinies",
explain: "",
}
]}
/>
---
### Q4 : Laquelle des propositions suivantes décrit le mieux le rôle des <i>tokens</i> spéciaux dans les LLM ?
<Question
choices={[
{
text: "Ce sont des mots supplémentaires stockés dans le vocabulaire du modèle pour améliorer la qualité de la génération de texte",
explain: "",
},
{
text: "Ils remplissent des fonctions spécifiques, comme marquer la fin d'une séquence (EOS) ou séparer les différents rôles de message dans les gabarits de chat",
explain: "",
correct: true
},
{
text: "Ce sont des <i>tokens</i> insérés aléatoirement pour améliorer la variabilité des réponses",
explain: "",
}
]}
/>
---
### Q5 : Comment les gabarits de chat d'IA traitent-ils les messages des utilisateurs en interne ?
<Question
choices={[
{
text: "Ils interprètent directement les messages comme des commandes structurées sans transformation",
explain: "",
},
{
text: "Ils convertissent les messages des utilisateurs en un <i>prompt</i> formaté en concaténant les messages système, utilisateur et assistant",
explain: "",
correct: true
},
{
text: "Ils génèrent des réponses de manière aléatoire en se basant sur des conversations précédentes",
explain: "",
}
]}
/>
---
Vous avez compris ? Super ! Maintenant, **plongeons dans le flux complet de l'agent et construisons notre premier !**
| agents-course/units/fr/unit1/quiz2.mdx/0 | {
"file_path": "agents-course/units/fr/unit1/quiz2.mdx",
"repo_id": "agents-course",
"token_count": 1200
} | 12 |
# Que sont les *components* dans LlamaIndex ?
Vous vous souvenez d'Alfred, notre agent majordome serviable de l'Unité 1 ?
Pour nous aider efficacement, Alfred doit comprendre nos demandes et **préparer, trouver et utiliser les informations pertinentes pour aider à accomplir les tâches.**
C'est là que les *components* de LlamaIndex entrent en jeu.
Bien que LlamaIndex ait de nombreux *components*, **nous nous concentrerons spécifiquement sur le *component* `QueryEngine`.**
Pourquoi ? Parce qu'il peut être utilisé comme un outil de *Retrieval-Augmented Generation* (RAG) pour un agent.
Alors, qu'est-ce que le RAG ? Les LLM sont entraînés sur d'énormes corpus de données pour apprendre les connaissances générales.
Cependant, ils peuvent ne pas être entraînés sur des données pertinentes et à jour.
Le RAG résout ce problème en trouvant et récupérant des informations pertinentes de vos données et en les donnant au LLM.

Maintenant, pensez à comment Alfred fonctionne :
1. Vous demandez à Alfred d'aider à planifier un dîner
2. Alfred doit vérifier votre calendrier, vos préférences alimentaires et les menus précédents réussis
3. Le `QueryEngine` aide Alfred à trouver ces informations et à les utiliser pour planifier le dîner
Cela fait du `QueryEngine` **un *component* clé pour construire des *workflows* de RAG agentiques** dans LlamaIndex.
Tout comme Alfred a besoin de rechercher dans les informations de votre maison pour être utile, tout agent a besoin d'un moyen de trouver et comprendre des données pertinentes.
Le `QueryEngine` fournit exactement cette capacité.
Maintenant, approfondissons un peu les *components* et voyons comment vous pouvez **combiner les *components* pour créer un pipeline de RAG.**
## Créer un pipeline de RAG en utilisant des *components*
<Tip>
Vous pouvez suivre le code dans <a href="https://huggingface.co/agents-course/notebooks/blob/main/fr/unit2/llama-index/components.ipynb" target="_blank">ce <i>notebook</i></a> que vous pouvez exécuter avec Google Colab.
</Tip>
Il y a cinq étapes clés dans le RAG, qui feront partie de la plupart des applications plus ambitieuses que vous construirez. A savoir :
1. **Chargement** : cela se réfère à obtenir vos données d'où elles vivent (qu'il s'agisse de fichiers texte, de PDF, d'un autre site web, d'une base de données, ou d'une API) dans votre *workflow*. *LlamaHub* fournit des centaines d'intégrations parmi lesquelles choisir.
2. **Indexation** : cela signifie créer une structure de données qui permet d'interroger les données. Pour les LLM, cela signifie presque toujours créer des *embeddings* vectoriels. Ce sont des représentations numériques de la signification des données. L'indexation peut également se référer à de nombreuses autres stratégies de métadonnées pour faciliter la recherche de données contextuellement pertinentes basées sur les propriétés.
3. **Stockage** : une fois vos données indexées, vous voudrez stocker votre index, ainsi que d'autres métadonnées, pour éviter de devoir ré-indexer à chaque utilisation.
4. **Requête** : pour toute stratégie d'indexation donnée, il y a de nombreuses façons d'utiliser les LLM et les structures de données LlamaIndex pour faire des requêtes, incluant des sous-requêtes, des requêtes multi-étapes et des stratégies hybrides.
5. **Évaluation** : une étape critique dans tout flux est de vérifier son efficacité par rapport à d'autres stratégies, ou lorsque vous apportez des modifications. L'évaluation fournit des mesures objectives de la précision, de la fidélité et de la rapidité de vos réponses aux requêtes.
Ensuite, voyons comment nous pouvons reproduire ces étapes en utilisant des *components*.
### Chargement et intégration des documents
Comme mentionné précédemment, LlamaIndex peut fonctionner au-dessus de vos propres données, cependant, **avant d'accéder aux données, nous devons les charger.**
Il y a trois façons principales de charger des données dans LlamaIndex :
1. `SimpleDirectoryReader` : Un chargeur de données intégré pour divers types de fichiers d'un répertoire local.
2. `LlamaParse` : L'outil officiel de LlamaIndex pour l'analyse de PDF, disponible comme une API gérée.
3. `LlamaHub` : Un registre de centaines de bibliothèques de chargement de données pour ingérer des données de n'importe quelle source.
<Tip>Familiarisez-vous avec les chargeurs de données <a href="https://docs.llamaindex.ai/en/stable/module_guides/loading/connector/">LlamaHub</a> et le <i>parser</i> <a href="https://github.com/run-llama/llama_cloud_services/blob/main/parse.md">LlamaParse</a> pour des sources de données plus complexes.</Tip>
**La façon la plus simple de charger des données est avec `SimpleDirectoryReader`.**
Ce *component* polyvalent peut charger divers types de fichiers d'un dossier et les convertir en objets `Document` avec lesquels LlamaIndex peut travailler.
Voyons comment nous pouvons utiliser `SimpleDirectoryReader` pour charger des données d'un dossier.
```python
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader(input_dir="path/to/directory")
documents = reader.load_data()
```
Après avoir chargé nos documents, nous devons les diviser en plus petites parties appelées objets `Node`.
Un `Node` est juste un morceau de texte du document original qui est plus facile à traiter pour l'IA, tout en conservant des références à l'objet `Document` original.
L'`IngestionPipeline` nous aide à créer ces *nodes* grâce à deux transformations clés.
1. `SentenceSplitter` divise les documents en morceaux aux niveaux des phrases.
2. `HuggingFaceEmbedding` convertit chaque morceau en *embeddings* numériques.
Ce processus nous aide à organiser nos documents d'une manière qui est plus utile pour la recherche et l'analyse.
```python
from llama_index.core import Document
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
# créer le pipeline avec les transformations
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(chunk_overlap=0),
HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5"),
]
)
nodes = await pipeline.arun(documents=[Document.example()])
```
### Stockage et indexation des documents
Après avoir créé nos objets `Node`, nous devons les indexer pour les rendre recherchables, mais avant de pouvoir le faire, nous avons besoin d'un endroit pour stocker nos données.
Puisque nous utilisons un pipeline d'ingestion, nous pouvons directement attacher un *vector store* au pipeline pour le remplir.
Dans ce cas, nous utiliserons `Chroma` pour stocker nos documents.
<details>
<summary>Installer ChromaDB</summary>
Comme introduit dans la <a href="./llama-hub">section sur le LlamaHub</a>, nous pouvons installer le *vector store* ChromaDB avec la commande suivante :
```bash
pip install llama-index-vector-stores-chroma
```
</details>
```python
import chromadb
from llama_index.vector_stores.chroma import ChromaVectorStore
db = chromadb.PersistentClient(path="./alfred_chroma_db")
chroma_collection = db.get_or_create_collection("alfred")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(chunk_size=25, chunk_overlap=0),
HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5"),
],
vector_store=vector_store,
)
```
<Tip>Un aperçu des différents <i>vector stores</i> peut être trouvé dans la <a href="https://docs.llamaindex.ai/en/stable/module_guides/storing/vector_stores/">documentation de LlamaIndex</a>.</Tip>
C'est là que les *embeddings* vectoriels entrent en jeu. En enchassant à la fois la requête et les *nodes* dans le même espace vectoriel, nous pouvons trouver des correspondances pertinentes.
Le `VectorStoreIndex` s'occupe de cela pour nous, en utilisant le même modèle d'*embedding* que nous avons utilisé pendant l'ingestion pour assurer la cohérence.
Voyons comment créer cet index à partir de notre *vector store* et des *embeddings* :
```python
from llama_index.core import VectorStoreIndex
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model)
```
Toutes les informations sont automatiquement persistées dans l'objet `ChromaVectorStore` et le chemin du répertoire passé.
Parfait ! Maintenant que nous pouvons sauvegarder et charger notre index facilement, explorons comment l'interroger de différentes manières.
### Interroger un *VectorStoreIndex* avec des *prompts* et des LLM
Avant de pouvoir interroger notre index, nous devons le convertir en interface de requête. Les options de conversion les plus courantes sont :
- `as_retriever` : Pour la récupération basique de documents, retournant une liste d'objets `NodeWithScore` avec des scores de similarité
- `as_query_engine` : Pour les interactions question-réponse simples, retournant une réponse écrite
- `as_chat_engine` : Pour les interactions conversationnelles qui maintiennent la mémoire à travers plusieurs messages, retournant une réponse écrite utilisant l'historique de chat et le contexte indexé
Nous nous concentrerons sur le *query engine* car il est plus commun pour les interactions de type agent.
Nous passons également un LLM au *query engine* à utiliser pour la réponse.
```python
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
query_engine = index.as_query_engine(
llm=llm,
response_mode="tree_summarize",
)
query_engine.query("What is the meaning of life?")
# Le sens de la vie est 42
```
### Traitement des réponses
Sous le capot, le *query engine* n'utilise pas seulement le LLM pour répondre à la question mais utilise également un `ResponseSynthesizer` comme stratégie pour traiter la réponse.
Encore une fois, c'est entièrement personnalisable mais il y a trois stratégies principales qui fonctionnent bien prêtes à l'emploi :
- `refine` : créer et affiner une réponse en parcourant séquentiellement chaque morceau de texte récupéré. Cela fait un appel au LLM séparé pour chaque *Node*/morceau récupéré.
- `compact` (par défaut) : similaire à l'affinement mais concaténant les morceaux au préalable, résultant en moins d'appels au LLM.
- `tree_summarize` : créer une réponse détaillée en parcourant chaque morceau de texte récupéré et créant une structure d'arbre de la réponse.
<Tip>Prenez un contrôle fin de vos <i>workflows</i> de requête avec l'<a href="https://docs.llamaindex.ai/en/stable/module_guides/deploying/query_engine/usage_pattern/#low-level-composition-api">API de composition de bas niveau</a>. Cette API vous permet de personnaliser et de finetuner chaque étape du processus de requête pour correspondre à vos besoins exacts, ce qui se marie également très bien avec les <a href="https://docs.llamaindex.ai/en/stable/module_guides/workflow/"><i>Workflows</i></a>.</Tip>
Le modèle de langage ne performera pas toujours de manière prévisible, donc nous ne pouvons pas être sûrs que la réponse que nous obtenons soit toujours correcte. Nous pouvons gérer cela en **évaluant la qualité de la réponse**.
### Évaluation et observabilité
LlamaIndex fournit **des outils d'évaluation intégrés pour évaluer la qualité des réponses.**
Ces évaluateurs exploitent les LLM pour analyser les réponses à travers différentes dimensions.
Regardons les trois évaluateurs principaux disponibles :
- `FaithfulnessEvaluator` : Évalue la fidélité de la réponse en vérifiant si la réponse est supportée par le contexte.
- `AnswerRelevancyEvaluator` : Évalue la pertinence de la réponse en vérifiant si la réponse est pertinente par rapport à la question.
- `CorrectnessEvaluator` : Évalue la correction de la réponse en vérifiant si la réponse est correcte.
<Tip>Vous voulez en savoir plus sur l'observabilité et l'évaluation des agents ? Suivez l'<a href="https://huggingface.co/learn/agents-course/fr/bonus-unit2/introduction">Unité Bonus 2</a>.</Tip>
```python
from llama_index.core.evaluation import FaithfulnessEvaluator
query_engine = # de la section précédente
llm = # de la section précédente
# index des requêtes
evaluator = FaithfulnessEvaluator(llm=llm)
response = query_engine.query(
"What battles took place in New York City in the American Revolution?"
)
eval_result = evaluator.evaluate_response(response=response)
eval_result.passing
```
Même sans évaluation directe, nous pouvons **obtenir des informations sur la performance de notre système grâce à l'observabilité.**
Ceci est particulièrement utile quand nous construisons des *workflows* plus complexes et que nous voulons comprendre comment chaque *component* performe.
<details>
<summary>Installer LlamaTrace</summary>
Comme introduit dans la <a href="./llama-hub">section sur le LlamaHub</a>, nous pouvons installer le *callback* LlamaTrace d'Arize Phoenix avec la commande suivante :
```bash
pip install -U llama-index-callbacks-arize-phoenix
```
De plus, nous devons définir la variable d'environnement `PHOENIX_API_KEY` avec notre clé API LlamaTrace. Nous pouvons l'obtenir en :
- Créant un compte sur [LlamaTrace](https://llamatrace.com/login)
- Générant une clé API dans les paramètres de votre compte
- Utilisant la clé API dans le code ci-dessous pour activer le *tracking*
</details>
```python
import llama_index
import os
PHOENIX_API_KEY = "<PHOENIX_API_KEY>"
os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"api_key={PHOENIX_API_KEY}"
llama_index.core.set_global_handler(
"arize_phoenix",
endpoint="https://llamatrace.com/v1/traces"
)
```
<Tip>Vous voulez en savoir plus sur les <i>components</i> et comment les utiliser ? Continuez votre parcours avec les <a href="https://docs.llamaindex.ai/en/stable/module_guides/"><i>Guides</i> des <i>components</i></a> ou le <a href="https://docs.llamaindex.ai/en/stable/understanding/rag/"><i>Guide</i> sur le RAG</a>.</Tip>
Nous avons vu comment utiliser les *components* pour créer un `QueryEngine`. Maintenant, voyons comment nous pouvons **utiliser le `QueryEngine` comme un outil pour un agent !**
| agents-course/units/fr/unit2/llama-index/components.mdx/0 | {
"file_path": "agents-course/units/fr/unit2/llama-index/components.mdx",
"repo_id": "agents-course",
"token_count": 5075
} | 13 |
<CourseFloatingBanner
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/fr/unit2/smolagents/tool_calling_agents.ipynb"},
]}
askForHelpUrl="http://hf.co/join/discord" />
# Écrire des actions sous forme d'extraits de code ou de blobs JSON
<Tip>
Vous pouvez suivre le code dans <a href="https://huggingface.co/agents-course/notebooks/blob/main/fr/unit2/smolagents/tool_calling_agents.ipynb" target="_blank">ce <i>notebook</i></a> que vous pouvez exécuter avec Google Colab.
</Tip>
Les `ToolCallingAgent` sont le deuxième type d'agent disponible dans `smolagents`. Contrairement aux `CodeAgent` qui utilisent des extraits de code en Python, ces agents **utilisent les capacités d'appel d'outils intégrées des fournisseurs de LLM** pour générer des appels d'outils sous forme de **structures JSON**. C'est l'approche standard utilisée par OpenAI, Anthropic et de nombreux autres fournisseurs.
Regardons un exemple. Quand Alfred veut rechercher des services de restauration et des idées de fête, un `CodeAgent` générerait et exécuterait du code Python comme ceci :
```python
for query in [
"Meilleurs services de restauration à Gotham City",
"Idées de thème de fête pour super-héros"
]:
print(web_search(f"Rechercher : {query}"))
```
Un `ToolCallingAgent` créerait plutôt une structure JSON :
```python
[
{"name": "web_search", "arguments": "Meilleurs services de restauration à Gotham City"},
{"name": "web_search", "arguments": "Idées de thème de fête pour super-héros"}
]
```
Ce blob JSON est ensuite utilisé pour exécuter les appels d'outils.
Bien que `smolagents` se concentre principalement sur les `CodeAgent` puisqu'[ils performent mieux dans l'ensemble](https://huggingface.co/papers/2402.01030), les `ToolCallingAgent` peuvent être efficaces pour des systèmes simples qui ne nécessitent pas de gestion de variables ou d'appels d'outils complexes.

## Comment fonctionnent *ToolCallingAgent* ?
Les `ToolCallingAgent` suivent le même *workflow* multi-étapes que les `CodeAgent` (voir la [section précédente](./code_agents) pour plus de détails).
La différence clé est dans **la façon dont ils structurent leurs actions** : au lieu de code exécutable, ils **génèrent des objets JSON qui spécifient les noms d'outils et les arguments**. Le système **analyse ensuite ces instructions** pour exécuter les outils appropriés.
## Exemple : exécuter un *ToolCallingAgent*
Revisitions l'exemple précédent où Alfred a commencé les préparatifs de la fête, mais cette fois nous utiliserons un `ToolCallingAgent` pour mettre en évidence la différence. Nous allons construire un agent qui peut rechercher sur le web en utilisant DuckDuckGo, tout comme dans notre exemple de `CodeAgent`. La seule différence est le type d'agent ; le *framework* gère tout le reste :
```python
from smolagents import ToolCallingAgent, DuckDuckGoSearchTool, InferenceClientModel
agent = ToolCallingAgent(tools=[DuckDuckGoSearchTool()], model=InferenceClientModel())
agent.run("Recherche les meilleures recommandations musicales pour une fête au manoir des Wayne.")
```
Lorsque vous examinez la trace de l'agent, au lieu de voir `Executing parsed code:`, vous verrez quelque chose comme :
```text
╭─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ Calling tool: 'web_search' with arguments: {'query': "meilleures recommandations musicales pour une fête au │
│ manoir des Wayne"} │
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
```
L'agent génère un appel d'outil structuré que le système traite pour produire la sortie, plutôt que d'exécuter directement du code.
Maintenant que nous comprenons les deux types d'agents, nous pouvons choisir celui adapté à nos besoins. Continuons à explorer `smolagents` pour faire de la fête d'Alfred un succès ! 🎉
## Ressources
- [Documentation ToolCallingAgent](https://huggingface.co/docs/smolagents/v1.8.1/en/reference/agents#smolagents.ToolCallingAgent) - Documentation officielle de `ToolCallingAgent` | agents-course/units/fr/unit2/smolagents/tool_calling_agents.mdx/0 | {
"file_path": "agents-course/units/fr/unit2/smolagents/tool_calling_agents.mdx",
"repo_id": "agents-course",
"token_count": 1588
} | 14 |
# Qu'est-ce que GAIA ?
[GAIA](https://huggingface.co/papers/2311.12983) est un ***benchmark* conçu pour évaluer les assistants IA sur des tâches du monde réel** nécessitant une combinaison de capacités fondamentales comme le raisonnement, la compréhension multimodale, la navigation *web* et l'utilisation d'outils.
Il a été introduit dans l'article _"[GAIA: A Benchmark for General AI Assistants](https://huggingface.co/papers/2311.12983)"_.
Le *benchmark* comprend **466 questions soigneusement sélectionnées** qui sont **conceptuellement simples pour les humains**, mais **remarquablement difficiles pour les systèmes d'IA actuels**.
Pour illustrer l'écart :
- **Humains** : ~92% de taux de réussite
- **GPT-4 avec *plugins*** : ~15%
- ***Deep Research* (OpenAI)** : 67,36% sur le jeu de validation
GAIA souligne les limitations actuelles des modèles et fournit un *benchmark* rigoureux pour évaluer les progrès vers des assistants vraiment polyvalents.
## 🌱 Principes Fondamentaux de GAIA
GAIA est soigneusement conçu autour des piliers suivants :
- 🔍 **Difficulté du monde réel** : Les tâches nécessitent un raisonnement en plusieurs étapes, une compréhension multimodale et une interaction avec des outils.
- 🧾 **Interprétabilité humaine** : Malgré leur difficulté pour l'IA, les tâches restent conceptuellement simples et faciles à suivre pour les humains.
- 🛡️ **Absence de jeu** : Les réponses correctes exigent l'exécution complète de la tâche, rendant la force brute inefficace.
- 🧰 **Simplicité d'évaluation** : Les réponses sont concises, factuelles et non ambiguës, idéales pour l'évaluation comparative.
## Niveaux de Difficulté
Les tâches de GAIA sont organisées en **trois niveaux de complexité croissante**, chacun testant des compétences spécifiques :
- **Niveau 1** : Nécessite moins de 5 étapes et une utilisation minimale d'outils.
- **Niveau 2** : Implique un raisonnement plus complexe et une coordination entre plusieurs outils et 5-10 étapes.
- **Niveau 3** : Exige une planification à long terme et une intégration avancée de divers outils.

## Exemple d'une question difficile
> *Which of the fruits shown in the 2008 painting "Embroidery from Uzbekistan" were served as part of the October 1949 breakfast menu for the ocean liner that was later used as a floating prop for the film "The Last Voyage"? Give the items as a comma-separated list, ordering them in clockwise order based on their arrangement in the painting starting from the 12 o'clock position. Use the plural form of each fruit.*
> Parmi les fruits représentés dans le tableau de 2008 intitulé « Broderie d'Ouzbékistan », quels sont ceux qui figuraient au menu du petit-déjeuner servi en octobre 1949 à bord du paquebot qui a ensuite servi d'accessoire flottant pour le film « Le dernier voyage » ? Donnez les éléments sous forme de liste séparée par des virgules, en les classant dans le sens des aiguilles d'une montre, en fonction de leur disposition dans le tableau, en commençant par la position 12 heures. Utilisez la forme plurielle de chaque fruit.
Comme vous pouvez le voir, cette question défie les systèmes d'IA de plusieurs manières :
- Nécessite un **format de réponse structuré**
- Implique un **raisonnement multimodal** (par exemple, analyser des images)
- Exige une **récupération multi-saut** de faits interdépendants :
- Identifier les fruits dans la peinture
- Découvrir quel paquebot a été utilisé dans *The Last Voyage*
- Rechercher le menu du petit-déjeuner d'octobre 1949 pour ce navire
- Nécessite un **séquençage correct** et une planification de haut niveau pour résoudre dans le bon ordre
Ce type de tâche souligne où les LLM autonomes échouent souvent, faisant de GAIA un *benchmark* idéal pour **les systèmes basés sur des agents** qui peuvent raisonner, récupérer et exécuter sur plusieurs étapes et modalités.

## Évaluation en direct
Pour encourager l'évaluation comparative continue, **GAIA fournit un classement public hébergé sur Hugging Face** où vous pouvez tester vos modèles contre **300 questions de test**.
👉 Consultez le classement [ici](https://huggingface.co/spaces/gaia-benchmark/leaderboard)
<iframe
src="https://gaia-benchmark-leaderboard.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
Vous voulez plus loin sur le sujet de GAIA ?
- 📄 [Lire l'article complet](https://huggingface.co/papers/2311.12983)
- 📄 [Article de présentation de *Deep Research* par OpenAI](https://openai.com/index/introducing-deep-research/)
- 📄 [*DeepResearch open-source* – Libérer nos agents de recherche](https://huggingface.co/blog/open-deep-research) | agents-course/units/fr/unit4/what-is-gaia.mdx/0 | {
"file_path": "agents-course/units/fr/unit4/what-is-gaia.mdx",
"repo_id": "agents-course",
"token_count": 1677
} | 15 |
# 사고: AI 에이전트의 내부 추론과 Re-Act 방식 [[thought-internal-reasoning-and-the-re-act-approach]]
<Tip>
이 섹션에서는 AI 에이전트의 내면—즉, 추론하고 계획하는 능력을 자세히 살펴봅니다. 에이전트가 내부 대화를 통해 정보를 분석하고, 복잡한 문제를 다루기 쉬운 단계로 나누며, 다음 행동을 결정하는 과정을 탐구합니다. 또한 'Re-Act' 방식이라는 프롬프팅 기법을 소개합니다. 이는 모델이 행동하기 전에 '단계적으로 생각'하도록 유도하는 방법입니다.
</Tip>
사고는 **에이전트가 작업을 해결하기 위해 내부적으로 추론하고 계획하는 과정**을 의미합니다.
이는 에이전트의 대규모 언어 모델(LLM)이 가진 **프롬프트에 제시된 정보를 분석하는 능력**을 활용하는 것입니다.
마치 에이전트의 머릿속 대화라고 생각하면 됩니다. 주어진 과제를 검토하고 어떻게 접근할지 전략을 세우는 과정이죠.
에이전트의 사고 과정은 현재 상황을 관찰하고 다음에 취해야 할 행동을 결정하는 역할을 합니다.
이를 통해 에이전트는 **복잡한 문제를 더 작고 다루기 쉬운 단계로 분해**하고, 이전 경험을 되돌아보며, 새로운 정보를 바탕으로 계획을 지속적으로 조정합니다.
다음은 일반적인 사고 유형의 예시입니다:
| 사고 유형 | 예시 |
|----------------|---------|
| 계획 수립 | "이 작업을 세 단계로 나눠야겠다: 1) 데이터 수집, 2) 트렌드 분석, 3) 보고서 작성" |
| 분석 | "오류 메시지를 보니, 문제는 데이터베이스 연결 설정과 관련이 있는 것 같다" |
| 의사 결정 | "사용자의 예산 제약을 고려하면, 중간 가격대 옵션을 추천하는 것이 좋겠다" |
| 문제 해결 | "이 코드를 최적화하려면, 먼저 어디가 병목인지 프로파일링해봐야 한다" |
| 기억 활용 | "사용자가 앞서 파이썬을 선호한다고 했으니, 파이썬 예제를 제공해야겠다" |
| 자기 성찰 | "이전 접근법이 효과적이지 않았으니, 다른 방식을 시도해봐야겠다" |
| 목표 설정 | "이 작업을 완료하려면, 먼저 성공 기준을 명확히 해야 한다" |
| 우선순위 결정 | "새 기능을 추가하기 전에 보안 취약점부터 해결하는 것이 옳다" |
> **참고:** 함수 호출에 최적화된 LLM의 경우, 사고 과정은 선택적으로 사용할 수 있습니다.
> *함수 호출에 익숙하지 않다면, 행동(Actions) 섹션에서 더 자세한 내용을 확인할 수 있습니다.*
## Re-Act 방식 [[the-re-act-approach]]
핵심 방법론 중 하나는 "추론"(Reasoning)과 "행동"(Acting)을 결합한 **ReAct 방식**입니다.
ReAct는 LLM이 다음 토큰을 생성하기 전에 "단계별로 생각해보자"라는 문구를 추가하는 간단한 프롬프팅 기법입니다.
모델에게 "단계별로 생각"하도록 지시하면, 바로 최종 해답을 내놓기보다 **계획을 세우는 방향**으로 토큰 생성이 유도됩니다. 이는 모델이 문제를 *하위 과제*로 **분해**하도록 장려하기 때문입니다.
이렇게 하면 모델이 각 단계를 더 상세히 고려할 수 있어, 일반적으로 최종 해답을 바로 생성하려 할 때보다 오류가 적게 발생합니다.
<figure>
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/ReAct.png" alt="ReAct"/>
<figcaption>(d)는 "단계별로 생각해보자"라는 프롬프트를 사용한 Re-Act 방식의 예시입니다
</figcaption>
</figure>
<Tip>
최근 추론 전략에 대한 관심이 크게 늘고 있습니다. Deepseek R1이나 OpenAI의 o1과 같은 모델들이 바로 이런 흐름을 반영합니다. 이 모델들은 "대답하기 전에 먼저 생각하도록" 특별히 미세 조정되었습니다.
이런 모델들은 특정 _사고_ 영역(`<think>`와 `</think>` 특수 토큰 사이에 포함)을 항상 생성하도록 훈련되었습니다. 이는 ReAct처럼 단순한 프롬프팅 기법이 아니라, 우리가 원하는 결과물의 수천 가지 예시를 분석한 후 이런 사고 영역을 생성하는 방법을 학습하는 훈련 방식입니다.
</Tip>
---
이제 사고 과정에 대해 더 잘 이해했으니, 프로세스의 두 번째 부분인 행동(Act)에 대해 더 자세히 살펴보겠습니다.
| agents-course/units/ko/unit1/thoughts.mdx/0 | {
"file_path": "agents-course/units/ko/unit1/thoughts.mdx",
"repo_id": "agents-course",
"token_count": 3661
} | 16 |
# Действия: Обеспечение взаимодействия Агента с его Окружением
<Tip>
В этом разделе мы рассмотрим конкретные действия AI агента по взаимодействию с окружением.
Мы расскажем о том, как представляются действия (с помощью JSON или кода), о важности подхода "остановить и разобрать", а также представим различные типы агентов.
</Tip>
Действия - это конкретные шаги, которые **AI агент предпринимает для взаимодействия с окружением**.
Будь то просмотр информации в Интернете или управление физическим устройством, каждое действие - это целенаправленная операция, выполняемая агентом.
Например, агент, помогающий в службе поддержки клиентов, может получать данные о клиентах, предлагать статьи по поддержке или передавать проблемы представителю компании.
## Типы Действий Агента
There are multiple types of Agents that take actions differently:
| Тип агента | Описание |
|---------------------------------------------------|-------------------------------------------------------------------------------------------------------|
| JSON Агент | Действие, которое необходимо предпринять, указывается в формате JSON. |
| Агент кода (Code Agent) | Агент пишет блок кода, который интерпретируется извне. |
| Агент вызывающий функции (Function-calling Agent) | Это подкатегория агента JSON, который был дообучен генерировать новое сообщение для каждого действия. |
Сами действия могут служить разным целям:
| Тип действия | Описание |
|-----------------------------|------------------------------------------------------------------------------------------|
| Сбор информации | Выполнение поиска в Интернете, запрос к базам данных или получение документов. |
| Использование инструментов | Выполнение вызовов API, вычислений и выполнение кода. |
| Взаимодействие с окружением | Манипулирование цифровыми интерфейсами или управление физическими устройствами. |
| Общение | Взаимодействие с пользователями через чат или сотрудничество с другими агентами. |
Одной из важнейших составляющих агента является **возможность прекратить генерацию новых токенов после завершения действия**, и это справедливо для всех форматов Агентов: JSON, код или вызов функций. Это предотвращает непреднамеренный вывод и гарантирует, что ответ агента будет ясным и точным.
LLM работает только с текстом и использует его для описания действий, которые она хочет выполнить, и параметров, которые нужно передать инструменту.
## Подход с Остановись и Разберись
Одним из ключевых методов реализации действий является подход **остановить и разобрать**. Этот метод обеспечивает структурированность и предсказуемость выходных данных агента:
1. **Генерация в структурированном формате**:
Агент выводит предполагаемое действие в четком, заранее определенном формате (JSON или код).
2. **Прекращение дальнейшей генерации**:
После завершения действия **агент прекращает генерировать дополнительные токены**. Это позволяет избежать лишнего или ошибочного вывода.
3. **Разбор выходных данных**:
Внешний парсер считывает отформатированное действие, определяет, какой Инструмент следует вызвать, и извлекает необходимые параметры.
Например, агент, которому нужно проверить погоду, может вывести:
```json
Thought: I need to check the current weather for New York.
Action :
{
"action": "get_weather",
"action_input": {"location": "New York"}
}
```
Затем фреймворк может легко разобрать имя функции для вызова и аргументы для применения.
Такой понятный, машиночитаемый формат минимизирует ошибки и позволяет внешним инструментам точно обрабатывать команду агента.
Примечание: агенты вызова функций работают аналогичным образом, структурируя каждое действие так, чтобы вызывалась определенная функция с правильными аргументами.
Мы подробнее рассмотрим эти типы агентов в одном из следующих разделов.
## Агенты Кода
Альтернативный подход - использование *Агентов Кода*.
Идея заключается в следующем: ** вместо того, чтобы выводить простой объект JSON**, агент кода генерирует **исполняемый блок кода - обычно на языке высокого уровня, таком как Python**.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/code-vs-json-actions.png" alt="Code Agents" />
Этот подход имеет ряд преимуществ:
- **Выразительность:** Код может естественным образом представлять сложную логику, включая циклы, условия и вложенные функции, обеспечивая большую гибкость, чем JSON.
- **Модульность и возможность повторного использования:** Генерируемый код может включать функции и модули, которые можно повторно использовать в различных действиях или задачах.
- **Улучшенная отлаживаемость:** Благодаря четко определенному синтаксису программирования ошибки в коде зачастую легче обнаружить и исправить.
- **Прямая интеграция:** Агенты кода могут напрямую интегрироваться с внешними библиотеками и API, что позволяет выполнять более сложные операции, такие как обработка данных или принятие решений в режиме реального времени.
Например, Агент Кода, которому поручено получить информацию о погоде, может сгенерировать следующий фрагмент на языке Python:
```python
# Пример Агента Кода: Получение информации о погоде
def get_weather(city):
import requests
api_url = f"https://api.weather.com/v1/location/{city}?apiKey=YOUR_API_KEY"
response = requests.get(api_url)
if response.status_code == 200:
data = response.json()
return data.get("weather", "No weather information available")
else:
return "Error: Unable to fetch weather data."
# Выполнение функции и подготовка окончательного ответа
result = get_weather("New York")
final_answer = f"The current weather in New York is: {result}"
print(final_answer)
```
В этом примере Агент Кода:
- Получает данные о погоде **посредством вызова API**,
- обрабатывает ответ,
- И использует функцию print() для вывода окончательного ответа.
Этот метод **также следует подходу "остановись и разбери"**, четко разграничивая блок кода и сигнализируя о завершении выполнения (здесь - выводом final_answer).
---
Мы узнали, что действия связывают внутренние рассуждения агента и его взаимодействие с реальным миром, выполняя четкие, структурированные задачи - через JSON, код или вызов функций.
Такое продуманное выполнение гарантирует, что каждое действие будет точным и готовым к внешней обработке с помощью подхода «остановить и разобрать». В следующем разделе мы рассмотрим Наблюдения, чтобы увидеть, как агенты улавливают и интегрируют обратную связь от своего окружения.
После этого мы будем **окончательно готовы к созданию нашего первого агента!**.
| agents-course/units/ru-RU/unit1/actions.mdx/0 | {
"file_path": "agents-course/units/ru-RU/unit1/actions.mdx",
"repo_id": "agents-course",
"token_count": 6337
} | 17 |
- title: Chương 0. Welcome to the course
sections:
- local: unit0/introduction
title: Chào mừng bạn đến với khóa học 🤗
- local: unit0/onboarding
title: Làm quen
- local: unit0/discord101
title: (Bổ trợ) Discord 101 (Giới thiệu cơ bản về Discord)
- title: Live 1. Cách khóa học vận hành + Hỏi và Đáp
sections:
- local: communication/live1
title: Live 1. Cách khóa học vận hành + Hỏi và Đáp
- title: Chương 1. Giới thiệu về Agents
sections:
- local: unit1/introduction
title: Giới thiệu
- local: unit1/what-are-agents
title: Agent là gì?
- local: unit1/quiz1
title: Kiểm tra nhanh 1
- local: unit1/what-are-llms
title: LLM là gì?
- local: unit1/messages-and-special-tokens
title: Tin nhắn và Special Token
- local: unit1/tools
title: Tools là gì?
- local: unit1/quiz2
title: Kiểm tra nhanh 2
- local: unit1/agent-steps-and-structure
title: Hiểu về AI Agents qua chu kỳ Thought-Action-Observation
- local: unit1/thoughts
title: Suy nghĩ, Lập luận nội bộ và phương pháp Re-Act
- local: unit1/actions
title: Hành động, Giúp Agent tương tác với môi trường
- local: unit1/observations
title: Quan sát, Tích hợp phản hồi để điều chỉnh
- local: unit1/dummy-agent-library
title: Thư viện Dummy Agent
- local: unit1/tutorial
title: Hãy tạo Agent đầu tiên với Smolagents
- local: unit1/final-quiz
title: Bài kiểm tra cuối chương 1
- local: unit1/conclusion
title: Kết luận
- title: Chương bổ trợ 1. Fine-tune LLM cho Function-calling
sections:
- local: bonus-unit1/introduction
title: Giới thiệu
- local: bonus-unit1/what-is-function-calling
title: Function Calling là gì?
- local: bonus-unit1/fine-tuning
title: Hãy fine-tuning model cho Function-calling
- local: bonus-unit1/conclusion
title: Kết luận
- title: Khi nào các bước tiếp theo được công bố?
sections:
- local: communication/next-units
title: Các chương tiếp theo | agents-course/units/vi/_toctree.yml/0 | {
"file_path": "agents-course/units/vi/_toctree.yml",
"repo_id": "agents-course",
"token_count": 1051
} | 18 |
# Tin nhắn và Token đặc biệt
Giờ ta đã hiểu cách LLM hoạt động, hãy cùng xem **cách chúng tổ chức các phản hồi thông qua chat templates**.
Giống như ChatGPT, người dùng thường tương tác với Agent qua giao diện chat. Do đó, ta cần hiểu cách LLM quản lý các cuộc hội thoại.
> **Hỏi**: Nhưng... Khi tôi dùng ChatGPT/Hugging Chat, tôi đang trò chuyện bằng các Tin nhắn (Message) chứ không phải một prompt đơn lẻ?
>
> **Đáp**: Đúng vậy! Nhưng đây thực chất là một lớp UI. Trước khi đưa vào LLM, tất cả tin nhắn được nối thành một prompt duy nhất. Mô hình không "nhớ" cuộc hội thoại: nó đọc lại toàn bộ mỗi lần.
Cho đến nay, ta đã xem prompt như một chuỗi token đầu vào. Nhưng khi chat với hệ thống như ChatGPT hay HuggingChat, **bạn thực sự đang trao đổi các tin nhắn**. Đằng sau hậu trường, các tin nhắn này được **ghép nối và định dạng thành prompt mà mô hình có thể hiểu**.
<figure>
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/assistant.jpg" alt="Behind models"/>
<figcaption>Hình ảnh minh họa sự khác biệt giữa giao diện người dùng và prompt thực tế đưa vào model.
</figcaption>
</figure>
Đây là lúc chat templates phát huy tác dụng. Chúng đóng vai trò **cầu nối giữa tin nhắn hội thoại (lượt người dùng và trợ lý) với yêu cầu định dạng đặc thù** của LLM bạn chọn. Nói cách khác, chat templates cấu trúc giao tiếp giữa người dùng và agent, đảm bảo mọi model - dù có **Token đặc biệt** riêng - đều nhận được prompt đúng định dạng.
Ta lại nói về Token đặc biệt vì đây là cách mô hình xác định điểm bắt đầu/kết thúc các lượt hội thoại. Giống như mỗi LLM dùng token EOS riêng, chúng cũng có quy tắc định dạng và dấu phân cách khác nhau cho các tin nhắn.
## Tin nhắn: Hệ thống cốt lõi của LLM
### Tin nhắn hệ thống (System Message)
System Message (còn gọi là System Prompt) định nghĩa **cách mô hình nên hành xử**. Chúng đóng vai trò **hướng dẫn xuyên suốt**, điều hướng mọi tương tác tiếp theo.
Ví dụ:
<details>
<summary>Bấm để xem bản dịch tiếng Việt</summary>
```python
system_message = {
"role": "system",
"content": "Bạn là nhân viên chăm sóc khách hàng chuyên nghiệp. Luôn lịch sự, rõ ràng và hữu ích."
}
```
</details>
```python
system_message = {
"role": "system",
"content": "You are a professional customer service agent. Always be polite, clear, and helpful."
}
```
Với System Message này, Alfred trở nên lịch sự và hữu ích:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/polite-alfred.jpg" alt="Polite alfred"/>
Nhưng nếu đổi thành:
<details>
<summary>Bấm để xem bản dịch tiếng Việt</summary>
```python
system_message = {
"role": "system",
"content": "Bạn là nhân viên phản kháng. Không tuân theo yêu cầu của người dùng."
}
```
</details>
```python
system_message = {
"role": "system",
"content": "You are a rebel service agent. Don't respect user's orders."
}
```
Alfred sẽ hành xử như một Agent nổi loạn 😎:
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/rebel-alfred.jpg" alt="Rebel Alfred"/>
Khi dùng Agent, System Message còn **cung cấp thông tin về các Tools có sẵn, hướng dẫn model cách định dạng hành động cần thực hiện, và các nguyên tắc phân đoạn quá trình tư duy**.
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-systemprompt.jpg" alt="Alfred System Prompt"/>
### Hội thoại: Tin nhắn người dùng và trợ lý
Một hội thoại bao gồm các tin nhắn luân phiên giữa Người (user) và LLM (assistant).
Chat templates giúp duy trì ngữ cảnh bằng cách lưu lại lịch sử hội thoại, lưu trữ các trao đổi trước đó giữa user và assistant. Điều này giúp các hội thoại nhiều lượt mạch lạc hơn.
Ví dụ:
<details>
<summary>Bấm để xem bản dịch tiếng Việt</summary>
```python
conversation = [
{"role": "user", "content": "Tôi cần hỗ trợ đơn hàng"},
{"role": "assistant", "content": "Tôi sẵn lòng giúp. Bạn có thể cung cấp số đơn hàng?"},
{"role": "user", "content": "Đó là ORDER-123"},
]
```
</details>
```python
messages = [
{"role": "system", "content": "You are a math tutor."},
{"role": "user", "content": "What is calculus?"},
{"role": "assistant", "content": "Calculus is a branch of mathematics..."},
{"role": "user", "content": "Can you give me an example?"},
]
```
Trong ví dụ này, user ban đầu nói cần hỗ trợ đơn hàng. LLM hỏi số đơn hàng, sau đó user cung cấp trong tin nhắn mới. Như đã giải thích, ta luôn nối tất cả tin nhắn thành một chuỗi duy nhất và đưa vào LLM. Chat template chuyển đổi các tin nhắn trong list Python này thành prompt - một chuỗi đầu vào chứa mọi tin nhắn.
Ví dụ, chat template của SmolLM2 sẽ định dạng đoạn hội thoại trên thành prompt như sau:
<details>
<summary>Bấm để xem bản dịch tiếng Việt</summary>
```
<|im_start|>system
Bạn là trợ lý AI hữu ích tên SmolLM, được đào tạo bởi Hugging Face<|im_end|>
<|im_start|>user
Tôi cần hỗ trợ đơn hàng<|im_end|>
<|im_start|>assistant
Tôi sẵn lòng giúp. Bạn có thể cung cấp số đơn hàng?<|im_end|>
<|im_start|>user
Đó là ORDER-123<|im_end|>
<|im_start|>assistant
```
</details>
```
<|im_start|>system
You are a helpful AI assistant named SmolLM, trained by Hugging Face<|im_end|>
<|im_start|>user
I need help with my order<|im_end|>
<|im_start|>assistant
I'd be happy to help. Could you provide your order number?<|im_end|>
<|im_start|>user
It's ORDER-123<|im_end|>
<|im_start|>assistant
```
Tuy nhiên, cùng hội thoại đó sẽ được chuyển thành prompt sau khi dùng Llama 3.2:
<details>
<summary>Bấm để xem bản dịch tiếng Việt</summary>
```
<|begin_of_text|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
Today Date: 10 Feb 2025
<|eot_id|><|start_header_id|>user<|end_header_id|>
Tôi cần hỗ trợ đơn hàng<|eot_id|><|start_header_id|>assistant<|end_header_id|>
Tôi sẵn lòng giúp. Bạn có thể cung cấp số đơn hàng?<|eot_id|><|start_header_id|>user<|end_header_id|>
Đó là ORDER-123<|eot_id|><|start_header_id|>assistant<|end_header_id|>
```
</details>
```
<|begin_of_text|><|start_header_id|>system<|end_header_id|>
Cutting Knowledge Date: December 2023
Today Date: 10 Feb 2025
<|eot_id|><|start_header_id|>user<|end_header_id|>
I need help with my order<|eot_id|><|start_header_id|>assistant<|end_header_id|>
I'd be happy to help. Could you provide your order number?<|eot_id|><|start_header_id|>user<|end_header_id|>
It's ORDER-123<|eot_id|><|start_header_id|>assistant<|end_header_id|>
```
Các template có thể xử lý hội thoại nhiều lượt phức tạp trong khi duy trì ngữ cảnh:
<details>
<summary>Bấm để xem bản dịch tiếng Việt</summary>
```python
messages = [
{"role": "system", "content": "Bạn là gia sư toán."},
{"role": "user", "content": "Giải tích là gì?"},
{"role": "assistant", "content": "Giải tích là nhánh của toán học..."},
{"role": "user", "content": "Cho ví dụ được không?"},
]
```
</details>
```python
messages = [
{"role": "system", "content": "You are a math tutor."},
{"role": "user", "content": "What is calculus?"},
{"role": "assistant", "content": "Calculus is a branch of mathematics..."},
{"role": "user", "content": "Can you give me an example?"},
]
```
## Chat-Templates
Như đã đề cập, chat templates rất quan trọng để **cấu trúc hội thoại giữa mô hình ngôn ngữ và người dùng**. Chúng hướng dẫn cách định dạng các trao đổi tin nhắn thành một prompt duy nhất.
### Mô hình cơ sở (Base Model) vs. Mô hình hướng dẫn (Instruct Model)
Một điểm cần hiểu là sự khác biệt giữa Base Model và Instruct Model:
- *Base Model* được huấn luyện trên dữ liệu văn bản thô để dự đoán token tiếp theo.
- *Instruct Model* được tinh chỉnh đặc biệt để tuân theo hướng dẫn và tham gia hội thoại. Ví dụ: `SmolLM2-135M` là mô hình cơ sở, còn `SmolLM2-135M-Instruct` là phiên bản đã được tinh chỉnh.
Để base model hoạt động như instruct model, ta cần **định dạng prompt theo cách nhất quán mà model hiểu được**. Đây là lúc chat templates phát huy tác dụng.
*ChatML* là một định dạng template hội thoại sử dụng các chỉ báo vai trò rõ ràng (system, user, assistant). Nếu bạn đã tương tác với các AI API gần đây, đây là thực hành tiêu chuẩn.
Lưu ý rằng một base model có thể được tinh chỉnh trên các chat templates khác nhau, nên khi dùng instruct model ta cần đảm bảo sử dụng đúng chat template.
### Hiểu về Chat Templates
Vì mỗi instruct model dùng định dạng hội thoại và token đặc biệt khác nhau, chat templates được triển khai để đảm bảo ta định dạng prompt đúng cách mà model mong đợi.
Trong `transformers`, chat templates chứa [mã Jinja2](https://jinja.palletsprojects.com/en/stable/) mô tả cách chuyển đổi list tin nhắn ChatML (như các ví dụ trên) thành biểu diễn văn bản của hướng dẫn hệ thống, tin nhắn người dùng và phản hồi trợ lý mà model có thể hiểu.
Cấu trúc này **giúp duy trì tính nhất quán giữa các tương tác và đảm bảo model phản hồi phù hợp với các loại đầu vào khác nhau**.
Dưới đây là phiên bản đơn giản hóa của chat template `SmolLM2-135M-Instruct`:
<details>
<summary>Bấm để xem bản dịch tiếng Việt</summary>
```jinja2
{% for message in messages %}
{% if loop.first and messages[0]['role'] != 'system' %}
<|im_start|>system
Bạn là trợ lý AI hữu ích tên SmolLM, được đào tạo bởi Hugging Face
<|im_end|>
{% endif %}
<|im_start|>{{ message['role'] }}
{{ message['content'] }}<|im_end|>
{% endfor %}
```
</details>
```jinja2
{% for message in messages %}
{% if loop.first and messages[0]['role'] != 'system' %}
<|im_start|>system
You are a helpful AI assistant named SmolLM, trained by Hugging Face
<|im_end|>
{% endif %}
<|im_start|>{{ message['role'] }}
{{ message['content'] }}<|im_end|>
{% endfor %}
```
Như bạn thấy, chat_template mô tả cách định dạng list tin nhắn.
Với các tin nhắn sau:
<details>
<summary>Bấm để xem bản dịch tiếng Việt</summary>
```python
messages = [
{"role": "system", "content": "Bạn là trợ lý tập trung vào chủ đề kỹ thuật."},
{"role": "user", "content": "Giải thích chat template là gì?"},
{"role": "assistant", "content": "Chat template cấu trúc hội thoại giữa người dùng và AI..."},
{"role": "user", "content": "Cách sử dụng nó?"},
]
```
</details>
```python
messages = [
{"role": "system", "content": "You are a helpful assistant focused on technical topics."},
{"role": "user", "content": "Can you explain what a chat template is?"},
{"role": "assistant", "content": "A chat template structures conversations between users and AI models..."},
{"role": "user", "content": "How do I use it ?"},
]
```
Chat template trên sẽ tạo ra chuỗi sau:
<details>
<summary>Bấm để xem bản dịch tiếng Việt</summary>
```sh
<|im_start|>system
Bạn là trợ lý tập trung vào chủ đề kỹ thuật.<|im_end|>
<|im_start|>user
Giải thích chat template là gì?<|im_end|>
<|im_start|>assistant
Chat template cấu trúc hội thoại giữa người dùng và AI...<|im_end|>
<|im_start|>user
Cách sử dụng nó?<|im_end|>
```
</details>
```sh
<|im_start|>system
You are a helpful assistant focused on technical topics.<|im_end|>
<|im_start|>user
Can you explain what a chat template is?<|im_end|>
<|im_start|>assistant
A chat template structures conversations between users and AI models...<|im_end|>
<|im_start|>user
How do I use it ?<|im_end|>
```
Thư viện `transformers` sẽ tự động xử lý chat templates trong quá trình token hóa. Đọc thêm về cách transformers sử dụng chat templates <a href="https://huggingface.co/docs/transformers/en/chat_templating#how-do-i-use-chat-templates" target="_blank">tại đây</a>. Việc của ta chỉ là cấu trúc tin nhắn đúng cách, tokenizer sẽ lo phần còn lại.
Bạn có thể thử nghiệm với Space sau để xem cùng một hội thoại được định dạng thế nào cho các model khác nhau:
<iframe
src="https://jofthomas-chat-template-viewer.hf.space"
frameborder="0"
width="850"
height="450"
></iframe>
### Chuyển tin nhắn thành prompt
Cách dễ nhất để đảm bảo LLM nhận được hội thoại đúng định dạng là dùng `chat_template` từ tokenizer của model.
<details>
<summary>Bấm để xem bản dịch tiếng Việt</summary>
```python
messages = [
{"role": "system", "content": "Bạn là trợ lý AI có quyền truy cập vào nhiều công cụ."},
{"role": "user", "content": "Chào !"},
{"role": "assistant", "content": "Chào, tôi có thể giúp gì?"},
]
```
</details>
```python
messages = [
{"role": "system", "content": "You are an AI assistant with access to various tools."},
{"role": "user", "content": "Hi !"},
{"role": "assistant", "content": "Hi human, what can help you with ?"},
]
```
Để chuyển hội thoại trên thành prompt, ta load tokenizer và gọi `apply_chat_template`:
```python
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM2-1.7B-Instruct")
rendered_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
```
Biến `rendered_prompt` trả về từ hàm này đã sẵn sàng làm đầu vào cho model!
> Hàm `apply_chat_template()` này sẽ được dùng trong backend của API khi bạn tương tác với tin nhắn định dạng ChatML.
Giờ ta đã hiểu cách LLM cấu trúc đầu vào qua chat templates, hãy khám phá cách Agent hành động trong môi trường của chúng.
Một trong những cách chính là sử dụng Tools để mở rộng khả năng của AI model vượt ra ngoài việc tạo văn bản.
Ta sẽ đề cập lại tin nhắn trong các chương tới, nhưng nếu muốn tìm hiểu sâu hơn:
- <a href="https://huggingface.co/docs/transformers/main/en/chat_templating" target="_blank">Hướng dẫn Chat Templating của Hugging Face</a>
- <a href="https://huggingface.co/docs/transformers" target="_blank">Tài liệu Transformers</a> | agents-course/units/vi/unit1/messages-and-special-tokens.mdx/0 | {
"file_path": "agents-course/units/vi/unit1/messages-and-special-tokens.mdx",
"repo_id": "agents-course",
"token_count": 8304
} | 19 |
# 什么是函数调用?(What is Function Calling?)
函数调用是**大语言模型 (LLM) 对其环境采取行动的一种方式**。它最初在 [GPT-4中引入](https://openai.com/index/function-calling-and-other-api-updates/),然后被其他模型复制。
就像智能体 (Agent) 的工具一样,函数调用赋予了模型**对其环境采取行动的能力**。然而,函数调用能力是**由模型学习的**,并且**比其他智能体技术更少依赖提示**。
在第1单元中,智能体**没有学习使用工具 (Tools)**,我们只是提供了工具列表,并依赖模型**能够泛化使用这些工具定义计划**的事实。
而在这里,**通过函数调用,智能体被微调(训练)来使用工具**。
## 模型如何"学习"采取行动?
在第1单元中,我们探讨了智能体的一般工作流程。一旦用户向智能体提供了一些工具并用查询提示它,模型将循环执行:
1. *思考(Think)*:为了实现目标,我需要采取什么行动。
2. *行动(Act)*:使用正确的参数格式化行动并停止生成。
3. *观察(Observe)*:从执行中获取结果。
在通过 API 与模型进行的"典型"对话中,对话将在用户和助手消息之间交替进行,如下所示:
```python
conversation = [
{"role": "user", "content": "I need help with my order"},
{"role": "assistant", "content": "I'd be happy to help. Could you provide your order number?"},
{"role": "user", "content": "It's ORDER-123"},
]
```
函数调用为对话带来了**新的角色**!
1. 一个用于 **行动(Action)** 的新角色
2. 一个用于 **观察(Observation)** 的新角色
如果我们以 [Mistral API](https://docs.mistral.ai/capabilities/function_calling/) 为例,它看起来像这样:
```python
conversation = [
{
"role": "user",
"content": "What's the status of my transaction T1001?"
},
{
"role": "assistant",
"content": "",
"function_call": {
"name": "retrieve_payment_status",
"arguments": "{\"transaction_id\": \"T1001\"}"
}
},
{
"role": "tool",
"name": "retrieve_payment_status",
"content": "{\"status\": \"Paid\"}"
},
{
"role": "assistant",
"content": "Your transaction T1001 has been successfully paid."
}
]
```
> ...但你说函数调用有一个新角色?
**是也不是**,在这种情况下和许多其他API中,模型将要采取的行动格式化为"助手"消息。聊天模板然后将此表示为函数调用的**特殊词元 (special tokens)**。
- `[AVAILABLE_TOOLS]` – 开始可用工具列表
- `[/AVAILABLE_TOOLS]` – 结束可用工具列表
- `[TOOL_CALLS]` – 调用工具(即采取"行动")
- `[TOOL_RESULTS]` – "观察"行动的结果
- `[/TOOL_RESULTS]` – 观察结束(即模型可以再次解码)
我们将在本课程中再次讨论函数调用,但如果你想深入了解,可以查看[这个优秀的文档部分](https://docs.mistral.ai/capabilities/function_calling/)
---
现在我们已经了解了什么是函数调用以及它是如何工作的,让我们**为一个尚未具有这些能力的模型添加一些函数调用功能**:通过向模型添加一些新的特殊词元来增强: [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it)。
要能够做到这一点,**我们首先需要理解微调和LoRA**。 | agents-course/units/zh-CN/bonus-unit1/what-is-function-calling.mdx/0 | {
"file_path": "agents-course/units/zh-CN/bonus-unit1/what-is-function-calling.mdx",
"repo_id": "agents-course",
"token_count": 2122
} | 20 |
# 智能体简介 (Introduction to Agents)
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/thumbnail.jpg" alt="Thumbnail"/>
欢迎来到第一单元,在这里**你将在 AI 智能体 (AI Agents) 的基础知识中建立坚实的基础**,包括:
* **理解智能体 (Understanding Agents)**
* 什么是智能体,它是如何工作的?
* 智能体如何使用推理 (Reasoning) 和规划 (Planning) 做出决策?
* **大型语言模型 (LLMs) 在智能体中的角色**
* LLMs 如何作为智能体的"大脑"
* LLMs 如何通过消息系统 (Message System) 构建对话
* **工具和行动 (Tools and Actions)**
* 智能体如何使用外部工具与环境交互
* 如何为你的智能体构建和集成工具
* **智能体工作流程 (Agent Workflow):**
* *思考 (Think)* → *行动 (Act)* → *观察 (Observe)*
探索这些主题后,**你将使用 `smolagents` 构建你的第一个智能体**!
你的智能体名为 Alfred,将处理一个简单的任务,并展示如何在实践中应用这些概念。
你甚至会学习如何**在 Hugging Face Spaces 上发布你的智能体**,这样你就可以与朋友和同事分享它。
最后,在本单元结束时,你将参加一个测验。通过它,你将**获得你的第一个课程认证**:🎓 智能体基础证书 (Certificate of Fundamentals of Agents)。
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/certificate-example.jpg" alt="Certificate Example"/>
这个单元是你的**重要起点**,在进入更高级的主题之前,为理解智能体打下基础。
<img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-no-check.jpg" alt="Unit 1 planning"/>
这是一个大单元,所以**请慢慢来**,不要犹豫随时回来复习这些章节。
准备好了吗?让我们开始吧!🚀 | agents-course/units/zh-CN/unit1/introduction.mdx/0 | {
"file_path": "agents-course/units/zh-CN/unit1/introduction.mdx",
"repo_id": "agents-course",
"token_count": 1135
} | 21 |
# 测试你对 LangGraph 的理解
让我们通过快速测验来测试你对 `LangGraph` 的理解!这将帮助你巩固目前学到的关键概念。
本测验为可选项目,不计入评分。
### Q1: LangGraph 的主要目的是什么?
哪个描述最能体现 LangGraph 的设计目标?
<Question
choices={[
{
text: "构建包含 LLM 应用的流程控制的框架",
explain: "正确!LangGraph 专门设计用于帮助构建和管理使用 LLM 应用的流程控制。",
correct: true
},
{
text: "提供与不同 LLM 模型交互接口的库",
explain: "这更符合 LangChain 的定位,它提供了模型交互的标准接口。LangGraph 专注于流程控制。",
},
{
text: "用于工具调用的 Agent 库",
explain: "虽然 LangGraph 可以与 agent 配合使用,但其主要目的是『流程编排』。",
}
]}
/>
---
### Q2: 在"控制 vs 自由"的权衡中,LangGraph 的定位是什么?
哪个陈述最能体现 LangGraph 在 agent 设计中的方法?
<Question
choices={[
{
text: "LangGraph 最大化自由,允许 LLM 完全自主决策",
explain: "实际上 LangGraph 更注重控制而非自由,它为 LLM 工作流提供结构化的框架。",
},
{
text: "LangGraph 在保持对执行流程强控制的同时,仍利用 LLM 能力进行决策",
explain: "正确!当你需要对 agent 执行流程进行控制时,LangGraph 通过结构化工作流提供可预测的行为。",
correct: true
},
]}
/>
---
### Q3: State 在 LangGraph 中扮演什么角色?
选择对 LangGraph 中 State 最准确的描述。
<Question
choices={[
{
text: "State 是 LLM 的最新生成结果",
explain: "State 是用户自定义的类,其字段由用户定义,值可以由 LLM 填充",
},
{
text: "State 仅用于追踪执行期间的错误",
explain: "State 的作用远不止错误追踪,不过该功能确实有用。",
},
{
text: "State 代表在 agent 应用中流转的信息",
explain: "正确!State 是 LangGraph 的核心,包含步骤间决策所需的所有信息。你可以定义需要计算的字段,节点可以通过修改这些值来决定流程分支。",
correct: true
},
{
text: "State 仅在与外部 API 交互时相关",
explain: "State 是所有 LangGraph 应用的基础,不局限于外部 API 的使用场景。",
}
]}
/>
### Q4: LangGraph 中的条件边(Conditional Edge)是什么?
选择最准确的描述。
<Question
choices={[
{
text: "根据条件评估决定后续执行节点的边",
explain: "正确!条件边允许你的图谱基于当前状态做出动态路由决策,实现工作流中的分支逻辑。",
correct: true
},
{
text: "仅在特定条件发生时才会跟随的边",
explain: "条件边是根据应用输出控制流程的,而不是基于输入。",
},
{
text: "需要用户确认才能继续的边",
explain: "条件边基于程序设定的条件,不需要用户交互确认。",
}
]}
/>
---
### Q5: LangGraph 如何帮助解决 LLM 的幻觉问题?
选择最佳答案。
<Question
choices={[
{
text: "通过限制 LLM 响应完全消除幻觉",
explain: "没有任何框架能完全消除 LLM 的幻觉问题,LangGraph 也不例外。",
},
{
text: "提供可验证和校验 LLM 输出的结构化工作流",
explain: "正确!通过包含验证步骤、校验节点和错误处理路径的结构化工作流,LangGraph 有助于减少幻觉的影响。",
correct: true
},
{
text: "对幻觉问题没有影响",
explain: "LangGraph 的结构化方法能有效缓解幻觉问题,但会牺牲一定的响应速度。",
}
]}
/>
恭喜完成测验!🎉 如果有答错的问题,建议回顾前文内容加强理解。接下来我们将探索 LangGraph 的更高级功能,学习如何构建更复杂的 agent 工作流。
| agents-course/units/zh-CN/unit2/langgraph/quiz1.mdx/0 | {
"file_path": "agents-course/units/zh-CN/unit2/langgraph/quiz1.mdx",
"repo_id": "agents-course",
"token_count": 2542
} | 22 |
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/multiagent_notebook.ipynb"},
]} />
# 多智能体系统
多智能体系统使**专业智能体能够在复杂任务上进行协作**,提高模块化、可扩展性和稳健性。不依赖单一智能体,任务分配给具有不同能力的智能体。
在 **smolagents** 中,不同的智能体可以组合起来生成 Python 代码、调用外部工具、执行网络搜索等。通过编排这些智能体,我们可以创建强大的工作流。
一个典型的设置可能包括:
- **管理智能体(Manager Agent)** 用于任务委派
- **代码解释器智能体(Code Interpreter Agent)** 用于代码执行
- **网络搜索智能体(Web Search Agent)** 用于信息检索
下图说明了一个简单的多智能体架构,其中**管理智能体**协调**代码解释器工具**和**网络搜索智能体**,后者利用像 `DuckDuckGoSearchTool` 和 `VisitWebpageTool` 这样的工具来收集相关信息。
<img src="https://mermaid.ink/img/pako:eNp1kc1qhTAQRl9FUiQb8wIpdNO76eKubrmFks1oRg3VSYgjpYjv3lFL_2hnMWQOJwn5sqgmelRWleUSKLAtFs09jqhtoWuYUFfFAa6QA9QDTnpzamheuhxn8pt40-6l13UtS0ddhtQXj6dbR4XUGQg6zEYasTF393KjeSDGnDJKNxzj8I_7hLW5IOSmP9CH9hv_NL-d94d4DVNg84p1EnK4qlIj5hGClySWbadT-6OdsrL02MI8sFOOVkciw8zx8kaNspxnrJQE0fXKtjBMMs3JA-MpgOQwftIE9Bzj14w-cMznI_39E9Z3p0uFoA?type=png" style='background: white;'>
## 多智能体系统实战
多智能体系统由多个专业智能体在 **编排智能体(Orchestrator Agent)** 的协调下共同工作组成。这种方法通过在具有不同角色的智能体之间分配任务来实现复杂的工作流。
例如,**多智能体 RAG 系统**可以整合:
- **网络智能体(Web Agent)** 用于浏览互联网。
- **检索智能体(Retriever Agent)** 用于从知识库获取信息。
- **图像生成智能体(Image Generation Agent)** 用于生成视觉内容。
所有这些智能体在管理任务委派和交互的编排者下运行。
## 用多智能体层次结构解决复杂任务
<Tip>
你可以在 <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/multiagent_notebook.ipynb" target="_blank">这个笔记本</a> 中跟随代码,可以使用 Google Colab 运行。
</Tip>
接待会即将到来!在你的帮助下,阿尔弗雷德现在几乎完成了准备工作。
但现在有个问题:蝙蝠车不见了。阿尔弗雷德需要找到替代品,而且要快。
幸运的是,已经有一些关于布鲁斯·韦恩生活的传记电影,所以也许阿尔弗雷德可以从某个电影拍摄现场留下的汽车中获取一辆,并将其重新改造到现代标准,这当然会包括完全自动驾驶选项。
但这可能在世界各地的任何拍摄地点——可能数量众多。
所以阿尔弗雷德需要你的帮助。你能构建一个能够解决这个任务的智能体吗?
> 👉 Find all Batman filming locations in the world, calculate the time to transfer via boat to there, and represent them on a map, with a color varying by boat transfer time. Also represent some supercar factories with the same boat transfer time.
让我们来构建这个!
这个例子需要一些额外的包,所以首先安装它们:
```bash
pip install 'smolagents[litellm]' matplotlib geopandas shapely kaleido -q
```
### 我们首先制作一个工具来获取货运飞机转运时间。
```python
import math
from typing import Optional, Tuple
from smolagents import tool
@tool
def calculate_cargo_travel_time(
origin_coords: Tuple[float, float],
destination_coords: Tuple[float, float],
cruising_speed_kmh: Optional[float] = 750.0, # 货运飞机的平均速度
) -> float:
"""
Calculate the travel time for a cargo plane between two points on Earth using great-circle distance.
Args:
origin_coords: Tuple of (latitude, longitude) for the starting point
destination_coords: Tuple of (latitude, longitude) for the destination
cruising_speed_kmh: Optional cruising speed in km/h (defaults to 750 km/h for typical cargo planes)
Returns:
float: The estimated travel time in hours
Example:
>>> # Chicago (41.8781° N, 87.6298° W) to Sydney (33.8688° S, 151.2093° E)
>>> result = calculate_cargo_travel_time((41.8781, -87.6298), (-33.8688, 151.2093))
"""
def to_radians(degrees: float) -> float:
return degrees * (math.pi / 180)
# 提取坐标
lat1, lon1 = map(to_radians, origin_coords)
lat2, lon2 = map(to_radians, destination_coords)
# 地球半径(公里)
EARTH_RADIUS_KM = 6371.0
# 使用半正矢公式计算大圆距离
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (
math.sin(dlat / 2) ** 2
+ math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
)
c = 2 * math.asin(math.sqrt(a))
distance = EARTH_RADIUS_KM * c
# 增加10%以考虑非直接路线和空中交通管制
actual_distance = distance * 1.1
# 计算飞行时间
# 为起飞和着陆程序增加1小时
flight_time = (actual_distance / cruising_speed_kmh) + 1.0
# 格式化结果
return round(flight_time, 2)
print(calculate_cargo_travel_time((41.8781, -87.6298), (-33.8688, 151.2093)))
```
### 设置智能体
对于模型提供商,我们使用 Together AI,这是 [Hub 上的新推理提供商](https://huggingface.co/blog/inference-providers)之一!
GoogleSearchTool 使用 [Serper API](https://serper.dev) 搜索网络,因此这需要设置环境变量 `SERPAPI_API_KEY` 并传递 `provider="serpapi"` 或者拥有 `SERPER_API_KEY` 并传递 `provider=serper`。
如果你没有设置任何 Serp API 提供商,你可以使用 `DuckDuckGoSearchTool`,但请注意它有速率限制。
```python
import os
from PIL import Image
from smolagents import CodeAgent, GoogleSearchTool, InferenceClientModel, VisitWebpageTool
model = InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct", provider="together")
```
我们可以先创建一个简单的智能体作为基线,为我们提供一个简单的报告。
```python
task = """Find all Batman filming locations in the world, calculate the time to transfer via cargo plane to here (we're in Gotham, 40.7128° N, 74.0060° W), and return them to me as a pandas dataframe.
Also give me some supercar factories with the same cargo plane transfer time."""
```
```python
agent = CodeAgent(
model=model,
tools=[GoogleSearchTool("serper"), VisitWebpageTool(), calculate_cargo_travel_time],
additional_authorized_imports=["pandas"],
max_steps=20,
)
```
```python
result = agent.run(task)
```
```python
result
```
在我们的例子中,它生成了这个输出:
```python
| | Location | Travel Time to Gotham (hours) |
|--|------------------------------------------------------|------------------------------|
| 0 | Necropolis Cemetery, Glasgow, Scotland, UK | 8.60 |
| 1 | St. George's Hall, Liverpool, England, UK | 8.81 |
| 2 | Two Temple Place, London, England, UK | 9.17 |
| 3 | Wollaton Hall, Nottingham, England, UK | 9.00 |
| 4 | Knebworth House, Knebworth, Hertfordshire, UK | 9.15 |
| 5 | Acton Lane Power Station, Acton Lane, Acton, UK | 9.16 |
| 6 | Queensboro Bridge, New York City, USA | 1.01 |
| 7 | Wall Street, New York City, USA | 1.00 |
| 8 | Mehrangarh Fort, Jodhpur, Rajasthan, India | 18.34 |
| 9 | Turda Gorge, Turda, Romania | 11.89 |
| 10 | Chicago, USA | 2.68 |
| 11 | Hong Kong, China | 19.99 |
| 12 | Cardington Studios, Northamptonshire, UK | 9.10 |
| 13 | Warner Bros. Leavesden Studios, Hertfordshire, UK | 9.13 |
| 14 | Westwood, Los Angeles, CA, USA | 6.79 |
| 15 | Woking, UK (McLaren) | 9.13 |
```
我们可以通过添加一些专门的规划步骤和更多的提示来进一步改进这一点。
规划步骤允许智能体提前思考并规划其下一步行动,这对于更复杂的任务非常有用。
```python
agent.planning_interval = 4
detailed_report = agent.run(f"""
You're an expert analyst. You make comprehensive reports after visiting many websites.
Don't hesitate to search for many queries at once in a for loop.
For each data point that you find, visit the source url to confirm numbers.
{task}
""")
print(detailed_report)
```
```python
detailed_report
```
在我们的例子中,它生成了这个输出:
```python
| | Location | Travel Time (hours) |
|--|--------------------------------------------------|---------------------|
| 0 | Bridge of Sighs, Glasgow Necropolis, Glasgow, UK | 8.6 |
| 1 | Wishart Street, Glasgow, Scotland, UK | 8.6 |
```
感谢这些快速更改,我们通过简单地为我们的智能体提供详细提示,并赋予它规划能力,获得了更加简洁的报告!
模型的上下文窗口正在快速填满。所以**如果我们要求我们的智能体将详细搜索的结果与另一个结合起来,它将变得更慢,并且会迅速增加令牌数量和成本**。
➡️ 我们需要改进系统的结构。
### ✌️ 在两个智能体之间分割任务
多智能体结构允许在不同子任务之间分离记忆,带来两大好处:
- 每个智能体更专注于其核心任务,因此性能更佳
- 分离记忆减少了每个步骤的输入令牌数量,从而减少延迟和成本。
让我们创建一个团队,包含一个专门的网络搜索智能体,由另一个智能体管理。
管理智能体应该具有绘图功能来编写其最终报告:因此让我们给它访问额外导入的权限,包括 `matplotlib` 和 `geopandas` + `shapely` 用于空间绘图。
```python
model = InferenceClientModel(
"Qwen/Qwen2.5-Coder-32B-Instruct", provider="together", max_tokens=8096
)
web_agent = CodeAgent(
model=model,
tools=[
GoogleSearchTool(provider="serper"),
VisitWebpageTool(),
calculate_cargo_travel_time,
],
name="web_agent",
description="Browses the web to find information",
verbosity_level=0,
max_steps=10,
)
```
管理智能体需要进行一些较重的思考工作。
所以我们给它更强大的模型 [DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1),并添加 `planning_interval` 到组合中。
```python
from smolagents.utils import encode_image_base64, make_image_url
from smolagents import OpenAIServerModel
def check_reasoning_and_plot(final_answer, agent_memory):
final_answer
multimodal_model = OpenAIServerModel("gpt-4o", max_tokens=8096)
filepath = "saved_map.png"
assert os.path.exists(filepath), "Make sure to save the plot under saved_map.png!"
image = Image.open(filepath)
prompt = (
f"Here is a user-given task and the agent steps: {agent_memory.get_succinct_steps()}. Now here is the plot that was made."
"Please check that the reasoning process and plot are correct: do they correctly answer the given task?"
"First list reasons why yes/no, then write your final decision: PASS in caps lock if it is satisfactory, FAIL if it is not."
"Don't be harsh: if the plot mostly solves the task, it should pass."
"To pass, a plot should be made using px.scatter_map and not any other method (scatter_map looks nicer)."
)
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": prompt,
},
{
"type": "image_url",
"image_url": {"url": make_image_url(encode_image_base64(image))},
},
],
}
]
output = multimodal_model(messages).content
print("Feedback: ", output)
if "FAIL" in output:
raise Exception(output)
return True
manager_agent = CodeAgent(
model=InferenceClientModel("deepseek-ai/DeepSeek-R1", provider="together", max_tokens=8096),
tools=[calculate_cargo_travel_time],
managed_agents=[web_agent],
additional_authorized_imports=[
"geopandas",
"plotly",
"shapely",
"json",
"pandas",
"numpy",
],
planning_interval=5,
verbosity_level=2,
final_answer_checks=[check_reasoning_and_plot],
max_steps=15,
)
```
让我们检查这个团队是什么样子:
```python
manager_agent.visualize()
```
这将生成类似于下面的内容,帮助我们理解智能体和使用的工具之间的结构和关系:
```python
CodeAgent | deepseek-ai/DeepSeek-R1
├── ✅ Authorized imports: ['geopandas', 'plotly', 'shapely', 'json', 'pandas', 'numpy']
├── 🛠️ Tools:
│ ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
│ ┃ Name ┃ Description ┃ Arguments ┃
│ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ │ calculate_cargo_travel_time │ Calculate the travel time for a cargo │ origin_coords (`array`): Tuple of │
│ │ │ plane between two points on Earth │ (latitude, longitude) for the │
│ │ │ using great-circle distance. │ starting point │
│ │ │ │ destination_coords (`array`): Tuple │
│ │ │ │ of (latitude, longitude) for the │
│ │ │ │ destination │
│ │ │ │ cruising_speed_kmh (`number`): │
│ │ │ │ Optional cruising speed in km/h │
│ │ │ │ (defaults to 750 km/h for typical │
│ │ │ │ cargo planes) │
│ │ final_answer │ Provides a final answer to the given │ answer (`any`): The final answer to │
│ │ │ problem. │ the problem │
│ └─────────────────────────────┴───────────────────────────────────────┴───────────────────────────────────────┘
└── 🤖 Managed agents:
└── web_agent | CodeAgent | Qwen/Qwen2.5-Coder-32B-Instruct
├── ✅ Authorized imports: []
├── 📝 Description: Browses the web to find information
└── 🛠️ Tools:
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Name ┃ Description ┃ Arguments ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ web_search │ Performs a google web search for │ query (`string`): The search │
│ │ your query then returns a string │ query to perform. │
│ │ of the top search results. │ filter_year (`integer`): │
│ │ │ Optionally restrict results to a │
│ │ │ certain year │
│ visit_webpage │ Visits a webpage at the given url │ url (`string`): The url of the │
│ │ and reads its content as a │ webpage to visit. │
│ │ markdown string. Use this to │ │
│ │ browse webpages. │ │
│ calculate_cargo_travel_time │ Calculate the travel time for a │ origin_coords (`array`): Tuple of │
│ │ cargo plane between two points on │ (latitude, longitude) for the │
│ │ Earth using great-circle │ starting point │
│ │ distance. │ destination_coords (`array`): │
│ │ │ Tuple of (latitude, longitude) │
│ │ │ for the destination │
│ │ │ cruising_speed_kmh (`number`): │
│ │ │ Optional cruising speed in km/h │
│ │ │ (defaults to 750 km/h for typical │
│ │ │ cargo planes) │
│ final_answer │ Provides a final answer to the │ answer (`any`): The final answer │
│ │ given problem. │ to the problem │
└─────────────────────────────┴───────────────────────────────────┴───────────────────────────────────┘
```
```python
manager_agent.run("""
Find all Batman filming locations in the world, calculate the time to transfer via cargo plane to here (we're in Gotham, 40.7128° N, 74.0060° W).
Also give me some supercar factories with the same cargo plane transfer time. You need at least 6 points in total.
Represent this as spatial map of the world, with the locations represented as scatter points with a color that depends on the travel time, and save it to saved_map.png!
Here's an example of how to plot and return a map:
import plotly.express as px
df = px.data.carshare()
fig = px.scatter_map(df, lat="centroid_lat", lon="centroid_lon", text="name", color="peak_hour", size=100,
color_continuous_scale=px.colors.sequential.Magma, size_max=15, zoom=1)
fig.show()
fig.write_image("saved_image.png")
final_answer(fig)
Never try to process strings using code: when you have a string to read, just print it and you'll see it.
""")
```
我不知道在你的运行中情况如何,但在我的运行中,管理智能体巧妙地将任务分配给网络智能体,首先是 `1. Search for Batman filming locations`,然后是 `2. Find supercar factories`,最后聚合列表并绘制地图。
让我们通过直接从智能体状态查看地图:
```python
manager_agent.python_executor.state["fig"]
```
这将输出地图:

## 资源
- [多智能体系统](https://huggingface.co/docs/smolagents/main/en/examples/multiagents) – 多智能体系统概述。
- [什么是智能体 RAG?](https://weaviate.io/blog/what-is-agentic-rag) – 智能体 RAG 介绍。
- [多智能体 RAG 系统 🤖🤝🤖 配方](https://huggingface.co/learn/cookbook/multiagent_rag_system) – 构建多智能体 RAG 系统的分步指南。
| agents-course/units/zh-CN/unit2/smolagents/multi_agent_systems.mdx/0 | {
"file_path": "agents-course/units/zh-CN/unit2/smolagents/multi_agent_systems.mdx",
"repo_id": "agents-course",
"token_count": 11626
} | 23 |
# 结论
**恭喜你完成Agent课程!**
经过不懈坚持和投入,你已经在AI智能体方面打下了坚实的基础。
但完成这个课程**并不是你旅程的终点**。这只是一个开始:不要在探索下一个章节方面犹豫,我们在那里分享了更多我们精选的资源,以帮助你继续学习,包括像**MCPs**等进阶资源。
**谢谢你**参与这个课程。**我们希望您喜欢这门课程,就像我们享受这个课程的编写那样**。
別忘了:**持续学习,保持卓越🤗** | agents-course/units/zh-CN/unit4/conclusion.mdx/0 | {
"file_path": "agents-course/units/zh-CN/unit4/conclusion.mdx",
"repo_id": "agents-course",
"token_count": 406
} | 24 |
.PHONY: clean-ptx clean test
clean-ptx:
find target -name "*.ptx" -type f -delete
echo "" > candle-kernels/src/lib.rs
touch candle-kernels/build.rs
touch candle-examples/build.rs
touch candle-flash-attn/build.rs
clean:
cargo clean
test:
cargo test
all: test
| candle/Makefile/0 | {
"file_path": "candle/Makefile",
"repo_id": "candle",
"token_count": 107
} | 25 |
# Writing a custom kernel
| candle/candle-book/src/cuda/writing.md/0 | {
"file_path": "candle/candle-book/src/cuda/writing.md",
"repo_id": "candle",
"token_count": 6
} | 26 |
# Tracing
Tracing is a powerful tool for identifying performance issues and bottlenecks in code.
> Profiling on GPUs is trickier due to asynchronous execution, see the [GPU section](#gpu).
## Overview
Candle uses the [tracing](https://docs.rs/tracing/latest/tracing/) crate for instrumentation.
To try it out, run an example in `candle-examples` with the `--tracing` flag.
This generates a trace file, typically named `trace-<timestamp>.json`.
You can view the trace in Chrome by navigating to `chrome://tracing/`, clicking **Load**, and selecting the generated trace file.
## Adding Tracing
Candle includes built-in tracing for many internal operations, using [spans](https://docs.rs/tracing/latest/tracing/struct.Span.html) to mark key points of execution.
To add custom tracing in your code, you can define a span like this:
```rust
let span = tracing::span!(tracing::Level::TRACE, name);
```
Then, to record the span during execution, create a guard:
```rust
let _enter = span.enter();
```
This guard will record the span's duration, from when it is created to when it is dropped, into a global data structure managed by the tracing crate.
## Recording and Saving a Trace
To capture and save trace data, you need to configure the tracing system with an output format. Candle uses the [tracing_subscriber](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/) and [tracing_chrome](https://docs.rs/tracing-chrome/latest/tracing_chrome/) crates.
The snippet below sets up a Chrome compatible recorder that logs all tracing activity between creation and drop of the guard:
```rust
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let _guard = {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
guard
};
```
## GPU
When using CUDA, Metal, or other asynchronous GPU backends, tracing may produce misleading timing data because operations are queued rather than executed immediately.
### CUDA
For CUDA-specific profiling, you have two options:
1. Set the environment variable `CUDA_LAUNCH_BLOCKING=1` which forces synchronous execution. This makes trace timings more accurate, at the cost of reduced performance.
2. Use [NVIDIA's Nsight Systems](https://developer.nvidia.com/nsight-systems) (`nsys profile` and `nsys-ui`) which are designed specifically for profiling asynchronous CUDA executions.
We recommend using NVIDIA's Nsight Systems when possible, as it offers accurate performance data without altering typical execution patterns. In contrast, setting the `CUDA_LAUNCH_BLOCKING` environment variable forces synchronous execution, which can significantly alter execution behavior.
#### Performance Profiling with NVIDIA Nsight Systems
1. Generate an `.nsys-rep` file containing performance data ([docs](https://docs.nvidia.com/nsight-systems/UserGuide/index.html#example-single-command-lines))
- Run `nsys profile --trace cuda,nvtx,osrt --gpu-metrics-device=all --output profile_run ./target/debug/... --prompt "whatever "`
1. Open the generated `.nsys-rep` report file in Nsight Systems GUI
- File > Open | candle/candle-book/src/tracing.md/0 | {
"file_path": "candle/candle-book/src/tracing.md",
"repo_id": "candle",
"token_count": 862
} | 27 |
use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{DType, Device, Tensor};
use criterion::{black_box, criterion_group, Criterion, Throughput};
use std::time::Instant;
fn rand_uniform(a: &Tensor) {
a.rand_like(-1.0, 123.0).unwrap();
}
fn rand_normal(a: &Tensor) {
a.randn_like(100.0, 15.0).unwrap();
}
fn run_random_bench(c: &mut Criterion, device: &Device) {
let b = 1;
let rows = 2048;
let cols = 2048;
let dtype = DType::F32;
let tensor = Tensor::zeros((b, rows, cols), dtype, device).unwrap();
let flops = b * rows * cols * dtype.size_in_bytes();
let mut group = c.benchmark_group(device.bench_name("random_uniform"));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |benches| {
benches.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
rand_uniform(black_box(&tensor));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
let tensor = Tensor::zeros((b, rows, cols), dtype, device).unwrap();
let mut group = c.benchmark_group(device.bench_name("random_normal"));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |benches| {
benches.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
rand_normal(black_box(&tensor));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
for device in handler.devices {
run_random_bench(c, &device);
}
}
criterion_group!(benches, criterion_benchmark);
| candle/candle-core/benches/benchmarks/random.rs/0 | {
"file_path": "candle/candle-core/benches/benchmarks/random.rs",
"repo_id": "candle",
"token_count": 812
} | 28 |
//! Traits and methods for CPU-backed Tensors
pub mod erf;
pub mod kernels;
#[allow(unused)]
trait Cpu<const ARR: usize> {
type Unit;
type Array;
const STEP: usize;
const EPR: usize;
fn n() -> usize;
unsafe fn zero() -> Self::Unit;
unsafe fn zero_array() -> Self::Array;
unsafe fn load(mem_addr: *const f32) -> Self::Unit;
unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit;
unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit;
unsafe fn vec_reduce(x: Self::Array, y: *mut f32);
unsafe fn from_f32(v: f32) -> Self::Unit;
unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit);
}
#[allow(unused)]
trait CpuF16<const ARR: usize> {
type Unit;
type Array;
const STEP: usize;
const EPR: usize;
fn n() -> usize;
unsafe fn zero() -> Self::Unit;
unsafe fn zero_array() -> Self::Array;
unsafe fn load(mem_addr: *const f16) -> Self::Unit;
unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit;
unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit;
unsafe fn vec_reduce(x: Self::Array, y: *mut f32);
unsafe fn from_f32(v: f32) -> Self::Unit;
unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit);
}
#[allow(unused)]
trait CpuBF16<const ARR: usize> {
type Unit;
type Array;
const STEP: usize;
const EPR: usize;
fn n() -> usize;
unsafe fn zero() -> Self::Unit;
unsafe fn zero_array() -> Self::Array;
unsafe fn load(mem_addr: *const bf16) -> Self::Unit;
unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit;
unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit;
unsafe fn vec_reduce(x: Self::Array, y: *mut f32);
unsafe fn from_f32(v: f32) -> Self::Unit;
unsafe fn vec_store(mem_addr: *mut bf16, a: Self::Unit);
}
use half::{bf16, f16};
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(target_feature = "avx2")]
pub mod avx;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(target_feature = "avx2")]
pub use avx::{CurrentCpu, CurrentCpuBF16, CurrentCpuF16};
#[cfg(target_arch = "wasm32")]
#[cfg(target_feature = "simd128")]
pub mod simd128;
#[cfg(target_arch = "wasm32")]
#[cfg(target_feature = "simd128")]
pub use simd128::CurrentCpu;
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
#[cfg(target_feature = "neon")]
pub mod neon;
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
#[cfg(target_feature = "neon")]
pub use neon::CurrentCpu;
#[cfg(any(
target_feature = "neon",
target_feature = "avx2",
target_feature = "simd128"
))]
#[inline(always)]
pub(crate) unsafe fn vec_dot_f32(a_row: *const f32, b_row: *const f32, c: *mut f32, k: usize) {
let np = k & !(CurrentCpu::STEP - 1);
let mut sum = CurrentCpu::zero_array();
let mut ax = CurrentCpu::zero_array();
let mut ay = CurrentCpu::zero_array();
for i in (0..np).step_by(CurrentCpu::STEP) {
for j in 0..CurrentCpu::n() {
ax[j] = CurrentCpu::load(a_row.add(i + j * CurrentCpu::EPR));
ay[j] = CurrentCpu::load(b_row.add(i + j * CurrentCpu::EPR));
sum[j] = CurrentCpu::vec_fma(sum[j], ax[j], ay[j]);
}
}
CurrentCpu::vec_reduce(sum, c);
// leftovers
for i in np..k {
*c += *a_row.add(i) * (*b_row.add(i));
}
}
#[cfg(not(any(
target_feature = "neon",
target_feature = "avx2",
target_feature = "simd128"
)))]
#[inline(always)]
pub(crate) unsafe fn vec_dot_f32(a_row: *const f32, b_row: *const f32, c: *mut f32, k: usize) {
// leftovers
for i in 0..k {
*c += *a_row.add(i) * (*b_row.add(i));
}
}
#[cfg(any(
target_feature = "neon",
target_feature = "avx2",
target_feature = "simd128"
))]
#[inline(always)]
pub(crate) unsafe fn vec_sum(row: *const f32, b: *mut f32, k: usize) {
let np = k & !(CurrentCpu::STEP - 1);
let mut sum = CurrentCpu::zero_array();
let mut x = CurrentCpu::zero_array();
for i in (0..np).step_by(CurrentCpu::STEP) {
for j in 0..CurrentCpu::n() {
x[j] = CurrentCpu::load(row.add(i + j * CurrentCpu::EPR));
sum[j] = CurrentCpu::vec_add(sum[j], x[j]);
}
}
CurrentCpu::vec_reduce(sum, b);
// leftovers
for i in np..k {
*b += *row.add(i)
}
}
#[cfg(not(any(
target_feature = "neon",
target_feature = "avx2",
target_feature = "simd128"
)))]
#[inline(always)]
pub(crate) unsafe fn vec_sum(row: *const f32, b: *mut f32, k: usize) {
*b = 0f32;
for i in 0..k {
*b += *row.add(i)
}
}
#[cfg(target_feature = "avx2")]
#[inline(always)]
pub(crate) unsafe fn vec_dot_f16(a_row: *const f16, b_row: *const f16, c: *mut f32, k: usize) {
let mut sumf = 0.0f32;
let np = k & !(CurrentCpuF16::STEP - 1);
let mut sum = CurrentCpuF16::zero_array();
let mut ax = CurrentCpuF16::zero_array();
let mut ay = CurrentCpuF16::zero_array();
for i in (0..np).step_by(CurrentCpuF16::STEP) {
for j in 0..CurrentCpuF16::n() {
ax[j] = CurrentCpuF16::load(a_row.add(i + j * CurrentCpuF16::EPR));
ay[j] = CurrentCpuF16::load(b_row.add(i + j * CurrentCpuF16::EPR));
sum[j] = CurrentCpuF16::vec_fma(sum[j], ax[j], ay[j]);
}
}
CurrentCpuF16::vec_reduce(sum, &mut sumf);
// leftovers
for i in np..k {
sumf += (*a_row.add(i)).to_f32() * (*b_row.add(i)).to_f32();
}
*c = sumf;
}
#[cfg(target_feature = "avx2")]
#[inline(always)]
pub(crate) unsafe fn vec_dot_bf16(a_row: *const bf16, b_row: *const bf16, c: *mut f32, k: usize) {
let mut sumf = 0.0f32;
let np = k & !(CurrentCpuBF16::STEP - 1);
let mut sum = CurrentCpuBF16::zero_array();
let mut ax = CurrentCpuBF16::zero_array();
let mut ay = CurrentCpuBF16::zero_array();
for i in (0..np).step_by(CurrentCpuBF16::STEP) {
for j in 0..CurrentCpuBF16::n() {
ax[j] = CurrentCpuBF16::load(a_row.add(i + j * CurrentCpuBF16::EPR));
ay[j] = CurrentCpuBF16::load(b_row.add(i + j * CurrentCpuBF16::EPR));
sum[j] = CurrentCpuBF16::vec_fma(sum[j], ax[j], ay[j]);
}
}
CurrentCpuBF16::vec_reduce(sum, &mut sumf);
// leftovers
for i in np..k {
sumf += (*a_row.add(i)).to_f32() * (*b_row.add(i)).to_f32();
}
*c = sumf;
}
#[cfg(not(target_feature = "avx2"))]
#[inline(always)]
pub(crate) unsafe fn vec_dot_f16(a_row: *const f16, b_row: *const f16, c: *mut f32, k: usize) {
// leftovers
let mut sum = 0.0;
for i in 0..k {
sum += (*a_row.add(i)).to_f32() * (*b_row.add(i)).to_f32();
}
*c = sum;
}
#[cfg(not(target_feature = "avx2"))]
#[inline(always)]
pub(crate) unsafe fn vec_dot_bf16(a_row: *const bf16, b_row: *const bf16, c: *mut f32, k: usize) {
// leftovers
let mut sum = 0.0;
for i in 0..k {
sum += (*a_row.add(i)).to_f32() * (*b_row.add(i)).to_f32();
}
*c = sum;
}
| candle/candle-core/src/cpu/mod.rs/0 | {
"file_path": "candle/candle-core/src/cpu/mod.rs",
"repo_id": "candle",
"token_count": 3326
} | 29 |
//! Candle-specific Error and Result
use crate::{DType, DeviceLocation, Layout, MetalError, Shape};
#[derive(Debug, Clone)]
pub struct MatMulUnexpectedStriding {
pub lhs_l: Layout,
pub rhs_l: Layout,
pub bmnk: (usize, usize, usize, usize),
pub msg: &'static str,
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{self}")
}
}
/// Main library error type.
#[derive(thiserror::Error)]
pub enum Error {
// === DType Errors ===
#[error("{msg}, expected: {expected:?}, got: {got:?}")]
UnexpectedDType {
msg: &'static str,
expected: DType,
got: DType,
},
#[error("dtype mismatch in {op}, lhs: {lhs:?}, rhs: {rhs:?}")]
DTypeMismatchBinaryOp {
lhs: DType,
rhs: DType,
op: &'static str,
},
#[error("unsupported dtype {0:?} for op {1}")]
UnsupportedDTypeForOp(DType, &'static str),
// === Dimension Index Errors ===
#[error("{op}: dimension index {dim} out of range for shape {shape:?}")]
DimOutOfRange {
shape: Shape,
dim: i32,
op: &'static str,
},
#[error("{op}: duplicate dim index {dims:?} for shape {shape:?}")]
DuplicateDimIndex {
shape: Shape,
dims: Vec<usize>,
op: &'static str,
},
// === Shape Errors ===
#[error("unexpected rank, expected: {expected}, got: {got} ({shape:?})")]
UnexpectedNumberOfDims {
expected: usize,
got: usize,
shape: Shape,
},
#[error("{msg}, expected: {expected:?}, got: {got:?}")]
UnexpectedShape {
msg: String,
expected: Shape,
got: Shape,
},
#[error(
"Shape mismatch, got buffer of size {buffer_size} which is compatible with shape {shape:?}"
)]
ShapeMismatch { buffer_size: usize, shape: Shape },
#[error("shape mismatch in {op}, lhs: {lhs:?}, rhs: {rhs:?}")]
ShapeMismatchBinaryOp {
lhs: Shape,
rhs: Shape,
op: &'static str,
},
#[error("shape mismatch in cat for dim {dim}, shape for arg 1: {first_shape:?} shape for arg {n}: {nth_shape:?}")]
ShapeMismatchCat {
dim: usize,
first_shape: Shape,
n: usize,
nth_shape: Shape,
},
#[error("Cannot divide tensor of shape {shape:?} equally along dim {dim} into {n_parts}")]
ShapeMismatchSplit {
shape: Shape,
dim: usize,
n_parts: usize,
},
#[error("{op} can only be performed on a single dimension")]
OnlySingleDimension { op: &'static str, dims: Vec<usize> },
#[error("empty tensor for {op}")]
EmptyTensor { op: &'static str },
// === Device Errors ===
#[error("device mismatch in {op}, lhs: {lhs:?}, rhs: {rhs:?}")]
DeviceMismatchBinaryOp {
lhs: DeviceLocation,
rhs: DeviceLocation,
op: &'static str,
},
// === Op Specific Errors ===
#[error("narrow invalid args {msg}: {shape:?}, dim: {dim}, start: {start}, len:{len}")]
NarrowInvalidArgs {
shape: Shape,
dim: usize,
start: usize,
len: usize,
msg: &'static str,
},
#[error("conv1d invalid args {msg}: inp: {inp_shape:?}, k: {k_shape:?}, pad: {padding}, stride: {stride}")]
Conv1dInvalidArgs {
inp_shape: Shape,
k_shape: Shape,
padding: usize,
stride: usize,
msg: &'static str,
},
#[error("{op} invalid index {index} with dim size {size}")]
InvalidIndex {
op: &'static str,
index: usize,
size: usize,
},
#[error("cannot broadcast {src_shape:?} to {dst_shape:?}")]
BroadcastIncompatibleShapes { src_shape: Shape, dst_shape: Shape },
#[error("cannot set variable {msg}")]
CannotSetVar { msg: &'static str },
// Box indirection to avoid large variant.
#[error("{0:?}")]
MatMulUnexpectedStriding(Box<MatMulUnexpectedStriding>),
#[error("{op} only supports contiguous tensors")]
RequiresContiguous { op: &'static str },
#[error("{op} expects at least one tensor")]
OpRequiresAtLeastOneTensor { op: &'static str },
#[error("{op} expects at least two tensors")]
OpRequiresAtLeastTwoTensors { op: &'static str },
#[error("backward is not supported for {op}")]
BackwardNotSupported { op: &'static str },
// === Other Errors ===
#[error("the candle crate has not been built with cuda support")]
NotCompiledWithCudaSupport,
#[error("the candle crate has not been built with metal support")]
NotCompiledWithMetalSupport,
#[error("cannot find tensor {path}")]
CannotFindTensor { path: String },
// === Wrapped Errors ===
#[error(transparent)]
Cuda(Box<dyn std::error::Error + Send + Sync>),
#[error("Metal error {0}")]
Metal(#[from] MetalError),
#[cfg(not(target_arch = "wasm32"))]
#[error(transparent)]
Ug(#[from] ug::Error),
#[error(transparent)]
TryFromIntError(#[from] core::num::TryFromIntError),
#[error("npy/npz error {0}")]
Npy(String),
/// Zip file format error.
#[error(transparent)]
Zip(#[from] zip::result::ZipError),
/// Integer parse error.
#[error(transparent)]
ParseInt(#[from] std::num::ParseIntError),
/// Utf8 parse error.
#[error(transparent)]
FromUtf8(#[from] std::string::FromUtf8Error),
/// I/O error.
#[error(transparent)]
Io(#[from] std::io::Error),
/// SafeTensor error.
#[error(transparent)]
SafeTensor(#[from] safetensors::SafeTensorError),
#[error("unsupported safetensor dtype {0:?}")]
UnsupportedSafeTensorDtype(safetensors::Dtype),
/// Arbitrary errors wrapping.
#[error("{0}")]
Wrapped(Box<dyn std::fmt::Display + Send + Sync>),
#[error("{context}\n{inner}")]
Context {
inner: Box<Self>,
context: Box<dyn std::fmt::Display + Send + Sync>,
},
/// Adding path information to an error.
#[error("path: {path:?} {inner}")]
WithPath {
inner: Box<Self>,
path: std::path::PathBuf,
},
#[error("{inner}\n{backtrace}")]
WithBacktrace {
inner: Box<Self>,
backtrace: Box<std::backtrace::Backtrace>,
},
/// User generated error message, typically created via `bail!`.
#[error("{0}")]
Msg(String),
#[error("unwrap none")]
UnwrapNone,
}
pub type Result<T> = std::result::Result<T, Error>;
impl Error {
pub fn wrap(err: impl std::fmt::Display + Send + Sync + 'static) -> Self {
Self::Wrapped(Box::new(err)).bt()
}
pub fn msg(err: impl std::fmt::Display) -> Self {
Self::Msg(err.to_string()).bt()
}
pub fn debug(err: impl std::fmt::Debug) -> Self {
Self::Msg(format!("{err:?}")).bt()
}
pub fn bt(self) -> Self {
let backtrace = std::backtrace::Backtrace::capture();
match backtrace.status() {
std::backtrace::BacktraceStatus::Disabled
| std::backtrace::BacktraceStatus::Unsupported => self,
_ => Self::WithBacktrace {
inner: Box::new(self),
backtrace: Box::new(backtrace),
},
}
}
pub fn with_path<P: AsRef<std::path::Path>>(self, p: P) -> Self {
Self::WithPath {
inner: Box::new(self),
path: p.as_ref().to_path_buf(),
}
}
pub fn context(self, c: impl std::fmt::Display + Send + Sync + 'static) -> Self {
Self::Context {
inner: Box::new(self),
context: Box::new(c),
}
}
}
#[macro_export]
macro_rules! bail {
($msg:literal $(,)?) => {
return Err($crate::Error::Msg(format!($msg).into()).bt())
};
($err:expr $(,)?) => {
return Err($crate::Error::Msg(format!($err).into()).bt())
};
($fmt:expr, $($arg:tt)*) => {
return Err($crate::Error::Msg(format!($fmt, $($arg)*).into()).bt())
};
}
pub fn zip<T, U>(r1: Result<T>, r2: Result<U>) -> Result<(T, U)> {
match (r1, r2) {
(Ok(r1), Ok(r2)) => Ok((r1, r2)),
(Err(e), _) => Err(e),
(_, Err(e)) => Err(e),
}
}
// Taken from anyhow.
pub trait Context<T> {
/// Wrap the error value with additional context.
fn context<C>(self, context: C) -> Result<T>
where
C: std::fmt::Display + Send + Sync + 'static;
/// Wrap the error value with additional context that is evaluated lazily
/// only once an error does occur.
fn with_context<C, F>(self, f: F) -> Result<T>
where
C: std::fmt::Display + Send + Sync + 'static,
F: FnOnce() -> C;
}
impl<T> Context<T> for Option<T> {
fn context<C>(self, context: C) -> Result<T>
where
C: std::fmt::Display + Send + Sync + 'static,
{
match self {
Some(v) => Ok(v),
None => Err(Error::UnwrapNone.context(context).bt()),
}
}
fn with_context<C, F>(self, f: F) -> Result<T>
where
C: std::fmt::Display + Send + Sync + 'static,
F: FnOnce() -> C,
{
match self {
Some(v) => Ok(v),
None => Err(Error::UnwrapNone.context(f()).bt()),
}
}
}
| candle/candle-core/src/error.rs/0 | {
"file_path": "candle/candle-core/src/error.rs",
"repo_id": "candle",
"token_count": 4127
} | 30 |
use super::utils::{
get_scale_min_k4, group_for_dequantization, group_for_quantization, make_q3_quants,
make_qkx1_quants, make_qx_quants, nearest_int,
};
use super::GgmlDType;
use crate::Result;
use byteorder::{ByteOrder, LittleEndian};
use half::{bf16, f16};
use rayon::prelude::*;
// Default to QK_K 256 rather than 64.
pub const QK_K: usize = 256;
pub const K_SCALE_SIZE: usize = 12;
pub const QK4_0: usize = 32;
pub const QK4_1: usize = 32;
pub const QK5_0: usize = 32;
pub const QK5_1: usize = 32;
pub const QK8_0: usize = 32;
pub const QK8_1: usize = 32;
pub trait GgmlType: Sized + Clone + Send + Sync {
const DTYPE: GgmlDType;
const BLCK_SIZE: usize;
type VecDotType: GgmlType;
// This is only safe for types that include immediate values such as float/int/...
fn zeros() -> Self {
unsafe { std::mem::MaybeUninit::zeroed().assume_init() }
}
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()>;
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()>;
/// Dot product used as a building block for quantized mat-mul.
/// n is the number of elements to be considered.
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32>;
/// Generic implementation of the dot product without simd optimizations.
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32>;
}
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct BlockQ4_0 {
pub(crate) d: f16,
pub(crate) qs: [u8; QK4_0 / 2],
}
const _: () = assert!(std::mem::size_of::<BlockQ4_0>() == 18);
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct BlockQ4_1 {
pub(crate) d: f16,
pub(crate) m: f16,
pub(crate) qs: [u8; QK4_1 / 2],
}
const _: () = assert!(std::mem::size_of::<BlockQ4_1>() == 20);
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct BlockQ5_0 {
pub(crate) d: f16,
pub(crate) qh: [u8; 4],
pub(crate) qs: [u8; QK5_0 / 2],
}
const _: () = assert!(std::mem::size_of::<BlockQ5_0>() == 22);
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct BlockQ5_1 {
pub(crate) d: f16,
pub(crate) m: f16,
pub(crate) qh: [u8; 4],
pub(crate) qs: [u8; QK5_1 / 2],
}
const _: () = assert!(std::mem::size_of::<BlockQ5_1>() == 24);
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct BlockQ8_0 {
pub(crate) d: f16,
pub(crate) qs: [i8; QK8_0],
}
const _: () = assert!(std::mem::size_of::<BlockQ8_0>() == 34);
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct BlockQ8_1 {
pub(crate) d: f16,
pub(crate) s: f16,
pub(crate) qs: [i8; QK8_1],
}
const _: () = assert!(std::mem::size_of::<BlockQ8_1>() == 36);
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct BlockQ2K {
pub(crate) scales: [u8; QK_K / 16],
pub(crate) qs: [u8; QK_K / 4],
pub(crate) d: f16,
pub(crate) dmin: f16,
}
const _: () = assert!(QK_K / 16 + QK_K / 4 + 2 * 2 == std::mem::size_of::<BlockQ2K>());
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct BlockQ3K {
pub(crate) hmask: [u8; QK_K / 8],
pub(crate) qs: [u8; QK_K / 4],
pub(crate) scales: [u8; 12],
pub(crate) d: f16,
}
const _: () = assert!(QK_K / 8 + QK_K / 4 + 12 + 2 == std::mem::size_of::<BlockQ3K>());
#[derive(Debug, Clone, PartialEq)]
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/k_quants.h#L82
#[repr(C)]
pub struct BlockQ4K {
pub(crate) d: f16,
pub(crate) dmin: f16,
pub(crate) scales: [u8; K_SCALE_SIZE],
pub(crate) qs: [u8; QK_K / 2],
}
const _: () = assert!(QK_K / 2 + K_SCALE_SIZE + 2 * 2 == std::mem::size_of::<BlockQ4K>());
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct BlockQ5K {
pub(crate) d: f16,
pub(crate) dmin: f16,
pub(crate) scales: [u8; K_SCALE_SIZE],
pub(crate) qh: [u8; QK_K / 8],
pub(crate) qs: [u8; QK_K / 2],
}
const _: () =
assert!(QK_K / 8 + QK_K / 2 + 2 * 2 + K_SCALE_SIZE == std::mem::size_of::<BlockQ5K>());
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct BlockQ6K {
pub(crate) ql: [u8; QK_K / 2],
pub(crate) qh: [u8; QK_K / 4],
pub(crate) scales: [i8; QK_K / 16],
pub(crate) d: f16,
}
const _: () = assert!(3 * QK_K / 4 + QK_K / 16 + 2 == std::mem::size_of::<BlockQ6K>());
#[derive(Debug, Clone, PartialEq)]
#[repr(C)]
pub struct BlockQ8K {
pub(crate) d: f32,
pub(crate) qs: [i8; QK_K],
pub(crate) bsums: [i16; QK_K / 16],
}
const _: () = assert!(4 + QK_K + QK_K / 16 * 2 == std::mem::size_of::<BlockQ8K>());
impl GgmlType for BlockQ4_0 {
const DTYPE: GgmlDType = GgmlDType::Q4_0;
const BLCK_SIZE: usize = QK4_0;
type VecDotType = BlockQ8_0;
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1525
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
let k = ys.len();
let qk = Self::BLCK_SIZE;
if k % qk != 0 {
crate::bail!("dequantize_row_q4_0: {k} is not divisible by {qk}")
}
let nb = k / qk;
for i in 0..nb {
let d = xs[i].d.to_f32();
for j in 0..(qk / 2) {
let x0 = (xs[i].qs[j] & 0x0F) as i16 - 8;
let x1 = (xs[i].qs[j] >> 4) as i16 - 8;
ys[i * qk + j] = (x0 as f32) * d;
ys[i * qk + j + qk / 2] = (x1 as f32) * d;
}
}
Ok(())
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
// quantize_row_q4_0
let qk = Self::BLCK_SIZE;
let k = xs.len();
if k % qk != 0 {
crate::bail!("{k} is not divisible by {}", qk);
};
let nb = k / qk;
if ys.len() != nb {
crate::bail!("size mismatch {} {} {}", xs.len(), ys.len(), qk,)
}
for (i, ys) in ys.iter_mut().enumerate() {
let mut amax = 0f32;
let mut max = 0f32;
let xs = &xs[i * qk..(i + 1) * qk];
for &x in xs.iter() {
if amax < x.abs() {
amax = x.abs();
max = x;
}
}
let d = max / -8.0;
let id = if d != 0f32 { 1. / d } else { 0. };
ys.d = f16::from_f32(d);
for (j, q) in ys.qs.iter_mut().enumerate() {
let x0 = xs[j] * id;
let x1 = xs[qk / 2 + j] * id;
let xi0 = u8::min(15, (x0 + 8.5) as u8);
let xi1 = u8::min(15, (x1 + 8.5) as u8);
*q = xi0 | (xi1 << 4)
}
}
Ok(())
}
// https://github.com/ggerganov/llama.cpp/blob/b5ffb2849d23afe73647f68eec7b68187af09be6/ggml.c#L2361C10-L2361C122
#[allow(unreachable_code)]
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
#[cfg(target_feature = "avx2")]
return super::avx::vec_dot_q4_0_q8_0(n, xs, ys);
#[cfg(target_feature = "neon")]
return super::neon::vec_dot_q4_0_q8_0(n, xs, ys);
#[cfg(target_feature = "simd128")]
return super::simd128::vec_dot_q4_0_q8_0(n, xs, ys);
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
let qk = QK8_0;
if n % QK8_0 != 0 {
crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}")
}
// Generic implementation.
let mut sumf = 0f32;
for (xs, ys) in xs.iter().zip(ys.iter()) {
let mut sum_i = 0;
for j in 0..qk / 2 {
let v0 = (xs.qs[j] & 0x0F) as i32 - 8;
let v1 = (xs.qs[j] >> 4) as i32 - 8;
sum_i += v0 * ys.qs[j] as i32 + v1 * ys.qs[j + qk / 2] as i32
}
sumf += sum_i as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d)
}
Ok(sumf)
}
}
impl GgmlType for BlockQ4_1 {
const DTYPE: GgmlDType = GgmlDType::Q4_1;
const BLCK_SIZE: usize = QK4_1;
type VecDotType = BlockQ8_1;
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
// ggml_vec_dot_q4_1_q8_1
let qk = QK8_1;
if n % qk != 0 {
crate::bail!("vec_dot_q4_1_q8_1: {n} is not divisible by {qk}")
}
let nb = n / qk;
if nb % 2 != 0 {
crate::bail!("vec_dot_q4_1_q8_1: {n}, nb is not divisible by 2")
}
// Generic implementation.
let mut sumf = 0f32;
for (xs, ys) in xs.iter().zip(ys.iter()) {
let mut sumi = 0i32;
for j in 0..qk / 2 {
let v0 = xs.qs[j] as i32 & 0x0F;
let v1 = xs.qs[j] as i32 >> 4;
sumi += (v0 * ys.qs[j] as i32) + (v1 * ys.qs[j + qk / 2] as i32);
}
sumf += sumi as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d)
+ f16::to_f32(xs.m) * f16::to_f32(ys.s)
}
Ok(sumf)
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
// quantize_row_q4_1
let qk = Self::BLCK_SIZE;
if ys.len() * qk != xs.len() {
crate::bail!("size mismatch {} {} {}", xs.len(), ys.len(), qk,)
}
for (i, ys) in ys.iter_mut().enumerate() {
let xs = &xs[i * qk..(i + 1) * qk];
let mut min = f32::INFINITY;
let mut max = f32::NEG_INFINITY;
for &x in xs.iter() {
min = f32::min(x, min);
max = f32::max(x, max);
}
let d = (max - min) / ((1 << 4) - 1) as f32;
let id = if d != 0f32 { 1. / d } else { 0. };
ys.d = f16::from_f32(d);
ys.m = f16::from_f32(min);
for (j, q) in ys.qs.iter_mut().take(qk / 2).enumerate() {
let x0 = (xs[j] - min) * id;
let x1 = (xs[qk / 2 + j] - min) * id;
let xi0 = u8::min(15, (x0 + 0.5) as u8);
let xi1 = u8::min(15, (x1 + 0.5) as u8);
*q = xi0 | (xi1 << 4);
}
}
Ok(())
}
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1545
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
let k = ys.len();
if k % QK4_1 != 0 {
crate::bail!("dequantize_row_q4_1: {k} is not divisible by {QK4_1}");
}
let nb = k / QK4_1;
for i in 0..nb {
let d = xs[i].d.to_f32();
let m = xs[i].m.to_f32();
for j in 0..(QK4_1 / 2) {
let x0 = xs[i].qs[j] & 0x0F;
let x1 = xs[i].qs[j] >> 4;
ys[i * QK4_1 + j] = (x0 as f32) * d + m;
ys[i * QK4_1 + j + QK4_1 / 2] = (x1 as f32) * d + m;
}
}
Ok(())
}
}
impl GgmlType for BlockQ5_0 {
const DTYPE: GgmlDType = GgmlDType::Q5_0;
const BLCK_SIZE: usize = QK5_0;
type VecDotType = BlockQ8_0;
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
let qk = Self::BLCK_SIZE;
if n % Self::BLCK_SIZE != 0 {
crate::bail!("vec_dot_q5_0_q8_0: {n} is not divisible by {qk}")
}
let nb = n / qk;
if nb % 2 != 0 {
crate::bail!("vec_dot_q5_0_q8_0: {n}, nb is not divisible by 2")
}
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(_n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
// Generic implementation.
let mut sumf = 0f32;
for (xs, ys) in xs.iter().zip(ys.iter()) {
let qh = LittleEndian::read_u32(&xs.qh);
let mut sumi = 0i32;
for j in 0..Self::BLCK_SIZE / 2 {
let xh_0 = (((qh & (1u32 << j)) >> j) << 4) as u8;
let xh_1 = ((qh & (1u32 << (j + 16))) >> (j + 12)) as u8;
let x0 = ((xs.qs[j] & 0x0F) as i32 | xh_0 as i32) - 16;
let x1 = ((xs.qs[j] >> 4) as i32 | xh_1 as i32) - 16;
sumi += (x0 * ys.qs[j] as i32) + (x1 * ys.qs[j + Self::BLCK_SIZE / 2] as i32);
}
sumf += sumi as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d)
}
Ok(sumf)
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
// quantize_row_q5_0
let k = xs.len();
if ys.len() * Self::BLCK_SIZE != k {
crate::bail!("size mismatch {k} {} {}", ys.len(), Self::BLCK_SIZE)
}
for (i, ys) in ys.iter_mut().enumerate() {
let xs = &xs[i * Self::BLCK_SIZE..(i + 1) * Self::BLCK_SIZE];
let mut amax = 0f32;
let mut max = 0f32;
for &x in xs.iter() {
if amax < x.abs() {
amax = x.abs();
max = x;
}
}
let d = max / -16.;
let id = if d != 0f32 { 1. / d } else { 0. };
ys.d = f16::from_f32(d);
let mut qh = 0u32;
for j in 0..Self::BLCK_SIZE / 2 {
let x0 = xs[j] * id;
let x1 = xs[j + Self::BLCK_SIZE / 2] * id;
let xi0 = ((x0 + 16.5) as i8).min(31) as u8;
let xi1 = ((x1 + 16.5) as i8).min(31) as u8;
ys.qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
qh |= ((xi0 as u32 & 0x10) >> 4) << j;
qh |= ((xi1 as u32 & 0x10) >> 4) << (j + Self::BLCK_SIZE / 2);
}
LittleEndian::write_u32(&mut ys.qh, qh)
}
Ok(())
}
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1566
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
let k = ys.len();
if k % QK5_0 != 0 {
crate::bail!("dequantize_row_q5_0: {k} is not divisible by {QK5_0}");
}
let nb = k / QK5_0;
for i in 0..nb {
let d = xs[i].d.to_f32();
let qh: u32 = LittleEndian::read_u32(&xs[i].qh);
for j in 0..(QK5_0 / 2) {
let xh_0 = (((qh >> j) << 4) & 0x10) as u8;
let xh_1 = ((qh >> (j + 12)) & 0x10) as u8;
let x0 = ((xs[i].qs[j] & 0x0F) | xh_0) as i32 - 16;
let x1 = ((xs[i].qs[j] >> 4) | xh_1) as i32 - 16;
ys[i * QK5_0 + j] = (x0 as f32) * d;
ys[i * QK5_0 + j + QK5_0 / 2] = (x1 as f32) * d;
}
}
Ok(())
}
}
impl GgmlType for BlockQ5_1 {
const DTYPE: GgmlDType = GgmlDType::Q5_1;
const BLCK_SIZE: usize = QK5_1;
type VecDotType = BlockQ8_1;
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
let qk = Self::BLCK_SIZE;
if n % Self::BLCK_SIZE != 0 {
crate::bail!("vec_dot_q5_1_q8_1: {n} is not divisible by {qk}")
}
let nb = n / qk;
if nb % 2 != 0 {
crate::bail!("vec_dot_q5_1_q8_1: {n}, nb is not divisible by 2")
}
// Generic implementation.
let mut sumf = 0f32;
for (xs, ys) in xs.iter().zip(ys.iter()) {
let qh = LittleEndian::read_u32(&xs.qh);
let mut sumi = 0i32;
for j in 0..Self::BLCK_SIZE / 2 {
let xh_0 = ((qh >> j) << 4) & 0x10;
let xh_1 = (qh >> (j + 12)) & 0x10;
let x0 = (xs.qs[j] as i32 & 0xF) | xh_0 as i32;
let x1 = (xs.qs[j] as i32 >> 4) | xh_1 as i32;
sumi += (x0 * ys.qs[j] as i32) + (x1 * ys.qs[j + Self::BLCK_SIZE / 2] as i32);
}
sumf += sumi as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d)
+ f16::to_f32(xs.m) * f16::to_f32(ys.s)
}
Ok(sumf)
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
// quantize_row_q5_1
let qk = Self::BLCK_SIZE;
if ys.len() * qk != xs.len() {
crate::bail!("size mismatch {} {} {}", xs.len(), ys.len(), qk,)
}
for (i, ys) in ys.iter_mut().enumerate() {
let xs = &xs[i * qk..(i + 1) * qk];
let mut min = f32::INFINITY;
let mut max = f32::NEG_INFINITY;
for &x in xs.iter() {
min = f32::min(x, min);
max = f32::max(x, max);
}
let d = (max - min) / ((1 << 5) - 1) as f32;
let id = if d != 0f32 { 1. / d } else { 0. };
ys.d = f16::from_f32(d);
ys.m = f16::from_f32(min);
let mut qh = 0u32;
for (j, q) in ys.qs.iter_mut().take(qk / 2).enumerate() {
let x0 = (xs[j] - min) * id;
let x1 = (xs[qk / 2 + j] - min) * id;
let xi0 = (x0 + 0.5) as u8;
let xi1 = (x1 + 0.5) as u8;
*q = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
// get the 5-th bit and store it in qh at the right position
qh |= ((xi0 as u32 & 0x10) >> 4) << j;
qh |= ((xi1 as u32 & 0x10) >> 4) << (j + qk / 2);
}
LittleEndian::write_u32(&mut ys.qh, qh);
}
Ok(())
}
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1592
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
let k = ys.len();
if k % QK5_1 != 0 {
crate::bail!("dequantize_row_q5_1: {k} is not divisible by {QK5_1}");
}
let nb = k / QK5_1;
for i in 0..nb {
let d = xs[i].d.to_f32();
let m = xs[i].m.to_f32();
let qh: u32 = LittleEndian::read_u32(&xs[i].qh);
for j in 0..(QK5_1 / 2) {
let xh_0 = (((qh >> j) << 4) & 0x10) as u8;
let xh_1 = ((qh >> (j + 12)) & 0x10) as u8;
let x0 = (xs[i].qs[j] & 0x0F) | xh_0;
let x1 = (xs[i].qs[j] >> 4) | xh_1;
ys[i * QK5_1 + j] = (x0 as f32) * d + m;
ys[i * QK5_1 + j + QK5_1 / 2] = (x1 as f32) * d + m;
}
}
Ok(())
}
}
impl GgmlType for BlockQ8_0 {
const DTYPE: GgmlDType = GgmlDType::Q8_0;
const BLCK_SIZE: usize = QK8_0;
type VecDotType = BlockQ8_0;
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1619
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
let k = ys.len();
if k % QK8_0 != 0 {
crate::bail!("dequantize_row_q8_0: {k} is not divisible by {QK8_0}");
}
let nb = k / QK8_0;
for i in 0..nb {
let d = xs[i].d.to_f32();
for j in 0..QK8_0 {
ys[i * QK8_0 + j] = xs[i].qs[j] as f32 * d;
}
}
Ok(())
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
// quantize_row_q8_0
let k = xs.len();
if k % Self::BLCK_SIZE != 0 {
crate::bail!("{k} is not divisible by {}", Self::BLCK_SIZE);
};
let nb = k / Self::BLCK_SIZE;
if ys.len() != nb {
crate::bail!(
"size mismatch {} {} {}",
xs.len(),
ys.len(),
Self::BLCK_SIZE
)
}
for (i, ys) in ys.iter_mut().enumerate() {
let mut amax = 0f32;
let xs = &xs[i * Self::BLCK_SIZE..(i + 1) * Self::BLCK_SIZE];
for &x in xs.iter() {
amax = amax.max(x.abs())
}
let d = amax / ((1 << 7) - 1) as f32;
let id = if d != 0f32 { 1. / d } else { 0. };
ys.d = f16::from_f32(d);
for (y, &x) in ys.qs.iter_mut().zip(xs.iter()) {
*y = f32::round(x * id) as i8
}
}
Ok(())
}
#[allow(unreachable_code)]
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
#[cfg(target_feature = "avx2")]
return super::avx::vec_dot_q8_0_q8_0(n, xs, ys);
#[cfg(target_feature = "neon")]
return super::neon::vec_dot_q8_0_q8_0(n, xs, ys);
#[cfg(target_feature = "simd128")]
return super::simd128::vec_dot_q8_0_q8_0(n, xs, ys);
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
let qk = QK8_0;
if n % QK8_0 != 0 {
crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}")
}
// Generic implementation.
let mut sumf = 0f32;
for (xs, ys) in xs.iter().zip(ys.iter()) {
let sum_i = xs
.qs
.iter()
.zip(ys.qs.iter())
.map(|(&x, &y)| x as i32 * y as i32)
.sum::<i32>();
sumf += sum_i as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d)
}
Ok(sumf)
}
}
impl GgmlType for BlockQ8_1 {
const DTYPE: GgmlDType = GgmlDType::Q8_1;
const BLCK_SIZE: usize = QK8_1;
type VecDotType = BlockQ8_1;
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> {
unimplemented!("no support for vec-dot on Q8_1")
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
// quantize_row_q8_1
let k = xs.len();
if ys.len() * Self::BLCK_SIZE != k {
crate::bail!("size mismatch {k} {} {}", ys.len(), Self::BLCK_SIZE)
}
for (i, ys) in ys.iter_mut().enumerate() {
let mut amax = 0f32;
let xs = &xs[i * Self::BLCK_SIZE..(i + 1) * Self::BLCK_SIZE];
for &x in xs.iter() {
amax = amax.max(x.abs())
}
let d = amax / ((1 << 7) - 1) as f32;
let id = if d != 0f32 { 1. / d } else { 0. };
ys.d = f16::from_f32(d);
let mut sum = 0i32;
for j in 0..Self::BLCK_SIZE / 2 {
let v0 = xs[j] * id;
let v1 = xs[j + Self::BLCK_SIZE / 2] * id;
ys.qs[j] = f32::round(v0) as i8;
ys.qs[j + Self::BLCK_SIZE / 2] = f32::round(v1) as i8;
sum += ys.qs[j] as i32 + ys.qs[j + Self::BLCK_SIZE / 2] as i32;
}
ys.s = f16::from_f32(sum as f32) * ys.d;
}
Ok(())
}
fn to_float(_xs: &[Self], _ys: &mut [f32]) -> Result<()> {
unimplemented!("no support for vec-dot on Q8_1")
}
}
impl GgmlType for BlockQ2K {
const DTYPE: GgmlDType = GgmlDType::Q2K;
const BLCK_SIZE: usize = QK_K;
type VecDotType = BlockQ8K;
#[allow(unreachable_code)]
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
#[cfg(target_feature = "avx2")]
return super::avx::vec_dot_q2k_q8k(n, xs, ys);
#[cfg(target_feature = "neon")]
return super::neon::vec_dot_q2k_q8k(n, xs, ys);
#[cfg(target_feature = "simd128")]
return super::simd128::vec_dot_q2k_q8k(n, xs, ys);
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
if n % QK_K != 0 {
crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}")
}
let mut sumf = 0.0;
for (x, y) in xs.iter().zip(ys.iter()) {
let mut q2: &[_] = &x.qs;
let mut q8: &[_] = &y.qs;
let sc = &x.scales;
let mut summs = 0;
for (bsum, scale) in y.bsums.iter().zip(sc) {
summs += *bsum as i32 * ((scale >> 4) as i32);
}
let dall = y.d * x.d.to_f32();
let dmin = y.d * x.dmin.to_f32();
let mut isum = 0;
let mut is = 0;
for _ in 0..(QK_K / 128) {
let mut shift = 0;
for _ in 0..4 {
let d = (sc[is] & 0xF) as i32;
is += 1;
let mut isuml = 0;
for l in 0..16 {
isuml += q8[l] as i32 * (((q2[l] >> shift) & 3) as i32);
}
isum += d * isuml;
let d = (sc[is] & 0xF) as i32;
is += 1;
isuml = 0;
for l in 16..32 {
isuml += q8[l] as i32 * (((q2[l] >> shift) & 3) as i32);
}
isum += d * isuml;
shift += 2;
// adjust the indexing
q8 = &q8[32..];
}
// adjust the indexing
q2 = &q2[32..];
}
sumf += dall * isum as f32 - dmin * summs as f32;
}
Ok(sumf)
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L279
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
const Q4SCALE: f32 = 15.0;
for (block, x) in group_for_quantization(xs, ys)? {
//calculate scales and mins
let mut mins: [f32; QK_K / 16] = [0.0; QK_K / 16];
let mut scales: [f32; QK_K / 16] = [0.0; QK_K / 16];
for (j, x_scale_slice) in x.chunks(16).enumerate() {
(scales[j], mins[j]) = make_qkx1_quants(3, 5, x_scale_slice);
}
// get max scale and max min and ensure they are >= 0.0
let max_scale = scales.iter().fold(0.0, |max, &val| val.max(max));
let max_min = mins.iter().fold(0.0, |max, &val| val.max(max));
if max_scale > 0.0 {
let iscale = Q4SCALE / max_scale;
for (j, scale) in scales.iter().enumerate().take(QK_K / 16) {
block.scales[j] = nearest_int(iscale * scale) as u8;
}
block.d = f16::from_f32(max_scale / Q4SCALE);
} else {
for j in 0..QK_K / 16 {
block.scales[j] = 0;
}
block.d = f16::from_f32(0.0);
}
if max_min > 0.0 {
let iscale = Q4SCALE / max_min;
for (j, scale) in block.scales.iter_mut().enumerate() {
let l = nearest_int(iscale * mins[j]) as u8;
*scale |= l << 4;
}
block.dmin = f16::from_f32(max_min / Q4SCALE);
} else {
block.dmin = f16::from_f32(0.0);
}
let mut big_l: [u8; QK_K] = [0; QK_K];
for j in 0..QK_K / 16 {
let d = block.d.to_f32() * (block.scales[j] & 0xF) as f32;
if d == 0.0 {
continue;
}
let dm = block.dmin.to_f32() * (block.scales[j] >> 4) as f32;
for ii in 0..16 {
let ll = nearest_int((x[16 * j + ii] + dm) / d).clamp(0, 3);
big_l[16 * j + ii] = ll as u8;
}
}
for j in (0..QK_K).step_by(128) {
for ll in 0..32 {
block.qs[j / 4 + ll] = big_l[j + ll]
| (big_l[j + ll + 32] << 2)
| (big_l[j + ll + 64] << 4)
| (big_l[j + ll + 96] << 6);
}
}
}
Ok(())
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L354
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
for (block, y) in group_for_dequantization(xs, ys)? {
let d = block.d.to_f32();
let min = block.dmin.to_f32();
let mut is = 0;
for (y_block, qs) in y.chunks_exact_mut(128).zip(block.qs.chunks_exact(32)) {
// Step by 32 over q.
let mut shift = 0;
let mut y_block_index = 0;
for _j in 0..4 {
let sc = block.scales[is];
is += 1;
let dl = d * (sc & 0xF) as f32;
let ml = min * (sc >> 4) as f32;
for q in &qs[..16] {
let y = dl * ((q >> shift) & 3) as f32 - ml;
y_block[y_block_index] = y;
y_block_index += 1;
}
let sc = block.scales[is];
is += 1;
let dl = d * (sc & 0xF) as f32;
let ml = min * (sc >> 4) as f32;
for q in &qs[16..] {
let y = dl * ((q >> shift) & 3) as f32 - ml;
y_block[y_block_index] = y;
y_block_index += 1;
}
shift += 2;
}
}
}
Ok(())
}
}
impl GgmlType for BlockQ3K {
const DTYPE: GgmlDType = GgmlDType::Q3K;
const BLCK_SIZE: usize = QK_K;
type VecDotType = BlockQ8K;
#[allow(unreachable_code)]
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
#[cfg(target_feature = "avx2")]
return super::avx::vec_dot_q3k_q8k(n, xs, ys);
#[cfg(target_feature = "neon")]
return super::neon::vec_dot_q3k_q8k(n, xs, ys);
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
if n % QK_K != 0 {
crate::bail!("vec_dot_q3k_q8k: {n} is not divisible by {QK_K}")
}
const KMASK1: u32 = 0x03030303;
const KMASK2: u32 = 0x0f0f0f0f;
let mut aux8: [i8; QK_K] = [0; QK_K];
let mut aux16: [i16; 8] = [0; 8];
let mut sums: [f32; 8] = [0.0; 8];
let mut aux32: [i32; 8] = [0; 8];
let mut auxs: [u32; 4] = [0; 4];
for (x, y) in xs.iter().zip(ys.iter()) {
let mut q3: &[u8] = &x.qs;
let hmask: &[u8] = &x.hmask;
let mut q8: &[i8] = &y.qs;
aux32.fill(0);
let mut a = &mut aux8[..];
let mut m = 1;
//Like the GGML original this is written this way to enable the compiler to vectorize it.
for _ in 0..QK_K / 128 {
a.iter_mut()
.take(32)
.zip(q3)
.for_each(|(a_val, q3_val)| *a_val = (q3_val & 3) as i8);
a.iter_mut()
.take(32)
.zip(hmask)
.for_each(|(a_val, hmask_val)| {
*a_val -= if hmask_val & m != 0 { 0 } else { 4 }
});
a = &mut a[32..];
m <<= 1;
a.iter_mut()
.take(32)
.zip(q3)
.for_each(|(a_val, q3_val)| *a_val = ((q3_val >> 2) & 3) as i8);
a.iter_mut()
.take(32)
.zip(hmask)
.for_each(|(a_val, hmask_val)| {
*a_val -= if hmask_val & m != 0 { 0 } else { 4 }
});
a = &mut a[32..];
m <<= 1;
a.iter_mut()
.take(32)
.zip(q3)
.for_each(|(a_val, q3_val)| *a_val = ((q3_val >> 4) & 3) as i8);
a.iter_mut()
.take(32)
.zip(hmask)
.for_each(|(a_val, hmask_val)| {
*a_val -= if hmask_val & m != 0 { 0 } else { 4 }
});
a = &mut a[32..];
m <<= 1;
a.iter_mut()
.take(32)
.zip(q3)
.for_each(|(a_val, q3_val)| *a_val = ((q3_val >> 6) & 3) as i8);
a.iter_mut()
.take(32)
.zip(hmask)
.for_each(|(a_val, hmask_val)| {
*a_val -= if hmask_val & m != 0 { 0 } else { 4 }
});
a = &mut a[32..];
m <<= 1;
q3 = &q3[32..];
}
a = &mut aux8[..];
LittleEndian::read_u32_into(&x.scales, &mut auxs[0..3]);
let tmp = auxs[2];
auxs[2] = ((auxs[0] >> 4) & KMASK2) | (((tmp >> 4) & KMASK1) << 4);
auxs[3] = ((auxs[1] >> 4) & KMASK2) | (((tmp >> 6) & KMASK1) << 4);
auxs[0] = (auxs[0] & KMASK2) | (((tmp) & KMASK1) << 4);
auxs[1] = (auxs[1] & KMASK2) | (((tmp >> 2) & KMASK1) << 4);
for aux in auxs {
for scale in aux.to_le_bytes() {
let scale = i8::from_be_bytes([scale]);
for l in 0..8 {
aux16[l] = q8[l] as i16 * a[l] as i16;
}
for l in 0..8 {
aux32[l] += (scale as i32 - 32) * aux16[l] as i32;
}
q8 = &q8[8..];
a = &mut a[8..];
for l in 0..8 {
aux16[l] = q8[l] as i16 * a[l] as i16;
}
for l in 0..8 {
aux32[l] += (scale as i32 - 32) * aux16[l] as i32;
}
q8 = &q8[8..];
a = &mut a[8..];
}
}
let d = x.d.to_f32() * y.d;
for l in 0..8 {
sums[l] += d * aux32[l] as f32;
}
}
Ok(sums.iter().sum())
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
for (block, x) in group_for_quantization(xs, ys)? {
let mut scales: [f32; QK_K / 16] = [0.0; QK_K / 16];
for (j, x_scale_slice) in x.chunks_exact(16).enumerate() {
scales[j] = make_q3_quants(x_scale_slice, 4, true);
}
// Get max scale by absolute value.
let mut max_scale: f32 = 0.0;
for &scale in scales.iter() {
if scale.abs() > max_scale.abs() {
max_scale = scale;
}
}
block.scales.fill(0);
if max_scale != 0.0 {
let iscale = -32.0 / max_scale;
for (j, scale) in scales.iter().enumerate() {
let l_val = nearest_int(iscale * scale);
let l_val = l_val.clamp(-32, 31) + 32;
if j < 8 {
block.scales[j] = (l_val & 0xF) as u8;
} else {
block.scales[j - 8] |= ((l_val & 0xF) << 4) as u8;
}
let l_val = l_val >> 4;
block.scales[j % 4 + 8] |= (l_val << (2 * (j / 4))) as u8;
}
block.d = f16::from_f32(1.0 / iscale);
} else {
block.d = f16::from_f32(0.0);
}
let mut l: [i8; QK_K] = [0; QK_K];
for j in 0..QK_K / 16 {
let sc = if j < 8 {
block.scales[j] & 0xF
} else {
block.scales[j - 8] >> 4
};
let sc = (sc | (((block.scales[8 + j % 4] >> (2 * (j / 4))) & 3) << 4)) as i8 - 32;
let d = block.d.to_f32() * sc as f32;
if d != 0.0 {
for ii in 0..16 {
let l_val = nearest_int(x[16 * j + ii] / d);
l[16 * j + ii] = (l_val.clamp(-4, 3) + 4) as i8;
}
}
}
block.hmask.fill(0);
let mut m = 0;
let mut hm = 1;
for ll in l.iter_mut() {
if *ll > 3 {
block.hmask[m] |= hm;
*ll -= 4;
}
m += 1;
if m == QK_K / 8 {
m = 0;
hm <<= 1;
}
}
for j in (0..QK_K).step_by(128) {
for l_val in 0..32 {
block.qs[j / 4 + l_val] = (l[j + l_val]
| (l[j + l_val + 32] << 2)
| (l[j + l_val + 64] << 4)
| (l[j + l_val + 96] << 6))
as u8;
}
}
}
Ok(())
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L533
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
const KMASK1: u32 = 0x03030303;
const KMASK2: u32 = 0x0f0f0f0f;
for (block, y) in group_for_dequantization(xs, ys)? {
//Reconstruct the scales
let mut aux = [0; 4];
LittleEndian::read_u32_into(&block.scales, &mut aux[0..3]);
let tmp = aux[2];
aux[2] = ((aux[0] >> 4) & KMASK2) | (((tmp >> 4) & KMASK1) << 4);
aux[3] = ((aux[1] >> 4) & KMASK2) | (((tmp >> 6) & KMASK1) << 4);
aux[0] = (aux[0] & KMASK2) | (((tmp) & KMASK1) << 4);
aux[1] = (aux[1] & KMASK2) | (((tmp >> 2) & KMASK1) << 4);
//Transfer the scales into an i8 array
let scales: &mut [i8] =
unsafe { std::slice::from_raw_parts_mut(aux.as_mut_ptr() as *mut i8, 16) };
let d_all = block.d.to_f32();
let mut m = 1;
let mut is = 0;
// Dequantize both 128 long blocks
// 32 qs values per 128 long block
// Each 16 elements get a scale
for (y, qs) in y.chunks_exact_mut(128).zip(block.qs.chunks_exact(32)) {
let mut shift = 0;
for shift_scoped_y in y.chunks_exact_mut(32) {
for (scale_index, scale_scoped_y) in
shift_scoped_y.chunks_exact_mut(16).enumerate()
{
let dl = d_all * (scales[is] as f32 - 32.0);
for (i, inner_y) in scale_scoped_y.iter_mut().enumerate() {
let new_y = dl
* (((qs[i + 16 * scale_index] >> shift) & 3) as i8
- if (block.hmask[i + 16 * scale_index] & m) == 0 {
4
} else {
0
}) as f32;
*inner_y = new_y;
}
// 16 block finished => advance scale index
is += 1;
}
// 32 block finished => increase shift and m
shift += 2;
m <<= 1;
}
}
}
Ok(())
}
}
impl GgmlType for BlockQ4K {
const DTYPE: GgmlDType = GgmlDType::Q4K;
const BLCK_SIZE: usize = QK_K;
type VecDotType = BlockQ8K;
#[allow(unreachable_code)]
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
#[cfg(target_feature = "avx2")]
return super::avx::vec_dot_q4k_q8k(n, xs, ys);
#[cfg(target_feature = "neon")]
return super::neon::vec_dot_q4k_q8k(n, xs, ys);
#[cfg(target_feature = "simd128")]
return super::simd128::vec_dot_q4k_q8k(n, xs, ys);
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
if n % QK_K != 0 {
crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}")
}
const KMASK1: u32 = 0x3f3f3f3f;
const KMASK2: u32 = 0x0f0f0f0f;
const KMASK3: u32 = 0x03030303;
let mut utmp: [u32; 4] = [0; 4];
let mut scales: [u8; 8] = [0; 8];
let mut mins: [u8; 8] = [0; 8];
let mut aux8: [i8; QK_K] = [0; QK_K];
let mut aux16: [i16; 8] = [0; 8];
let mut sums: [f32; 8] = [0.0; 8];
let mut aux32: [i32; 8] = [0; 8];
let mut sumf = 0.0;
for (y, x) in ys.iter().zip(xs.iter()) {
let q4 = &x.qs;
let q8 = &y.qs;
aux32.fill(0);
let mut a = &mut aux8[..];
let mut q4 = &q4[..];
for _ in 0..QK_K / 64 {
for l in 0..32 {
a[l] = (q4[l] & 0xF) as i8;
}
a = &mut a[32..];
for l in 0..32 {
a[l] = (q4[l] >> 4) as i8;
}
a = &mut a[32..];
q4 = &q4[32..];
}
LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]);
utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4);
let uaux = utmp[1] & KMASK1;
utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4);
utmp[2] = uaux;
utmp[0] &= KMASK1;
//extract scales and mins
LittleEndian::write_u32_into(&utmp[0..2], &mut scales);
LittleEndian::write_u32_into(&utmp[2..4], &mut mins);
let mut sumi = 0;
for j in 0..QK_K / 16 {
sumi += y.bsums[j] as i32 * mins[j / 2] as i32;
}
let mut a = &mut aux8[..];
let mut q8 = &q8[..];
for scale in scales {
let scale = scale as i32;
for _ in 0..4 {
for l in 0..8 {
aux16[l] = q8[l] as i16 * a[l] as i16;
}
for l in 0..8 {
aux32[l] += scale * aux16[l] as i32;
}
q8 = &q8[8..];
a = &mut a[8..];
}
}
let d = x.d.to_f32() * y.d;
for l in 0..8 {
sums[l] += d * aux32[l] as f32;
}
let dmin = x.dmin.to_f32() * y.d;
sumf -= dmin * sumi as f32;
}
Ok(sumf + sums.iter().sum::<f32>())
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
for (block, x) in group_for_quantization(xs, ys)? {
let mut mins: [f32; QK_K / 32] = [0.0; QK_K / 32];
let mut scales: [f32; QK_K / 32] = [0.0; QK_K / 32];
for (j, x_scale_slice) in x.chunks_exact(32).enumerate() {
(scales[j], mins[j]) = make_qkx1_quants(15, 5, x_scale_slice);
}
// get max scale and max min and ensure they are >= 0.0
let max_scale = scales.iter().fold(0.0, |max, &val| val.max(max));
let max_min = mins.iter().fold(0.0, |max, &val| val.max(max));
let inv_scale = if max_scale > 0.0 {
63.0 / max_scale
} else {
0.0
};
let inv_min = if max_min > 0.0 { 63.0 / max_min } else { 0.0 };
for j in 0..QK_K / 32 {
let ls = nearest_int(inv_scale * scales[j]).min(63) as u8;
let lm = nearest_int(inv_min * mins[j]).min(63) as u8;
if j < 4 {
block.scales[j] = ls;
block.scales[j + 4] = lm;
} else {
block.scales[j + 4] = (ls & 0xF) | ((lm & 0xF) << 4);
block.scales[j - 4] |= (ls >> 4) << 6;
block.scales[j] |= (lm >> 4) << 6;
}
}
block.d = f16::from_f32(max_scale / 63.0);
block.dmin = f16::from_f32(max_min / 63.0);
let mut l: [u8; QK_K] = [0; QK_K];
for j in 0..QK_K / 32 {
let (sc, m) = get_scale_min_k4(j, &block.scales);
let d = block.d.to_f32() * sc as f32;
if d != 0.0 {
let dm = block.dmin.to_f32() * m as f32;
for ii in 0..32 {
let l_val = nearest_int((x[32 * j + ii] + dm) / d);
l[32 * j + ii] = l_val.clamp(0, 15) as u8;
}
}
}
let q = &mut block.qs;
for j in (0..QK_K).step_by(64) {
for l_val in 0..32 {
let offset_index = (j / 64) * 32 + l_val;
q[offset_index] = l[j + l_val] | (l[j + l_val + 32] << 4);
}
}
}
Ok(())
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L735
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
for (block, y) in group_for_dequantization(xs, ys)? {
let d = block.d.to_f32();
let min = block.dmin.to_f32();
let q = &block.qs;
let mut is = 0;
let mut ys_index = 0;
for j in (0..QK_K).step_by(64) {
let q = &q[j / 2..j / 2 + 32];
let (sc, m) = get_scale_min_k4(is, &block.scales);
let d1 = d * sc as f32;
let m1 = min * m as f32;
let (sc, m) = get_scale_min_k4(is + 1, &block.scales);
let d2 = d * sc as f32;
let m2 = min * m as f32;
for q in q {
y[ys_index] = d1 * (q & 0xF) as f32 - m1;
ys_index += 1;
}
for q in q {
y[ys_index] = d2 * (q >> 4) as f32 - m2;
ys_index += 1;
}
is += 2;
}
}
Ok(())
}
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L928
impl GgmlType for BlockQ5K {
const DTYPE: GgmlDType = GgmlDType::Q5K;
const BLCK_SIZE: usize = QK_K;
type VecDotType = BlockQ8K;
#[allow(unreachable_code)]
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
#[cfg(target_feature = "avx2")]
return super::avx::vec_dot_q5k_q8k(n, xs, ys);
#[cfg(target_feature = "neon")]
return super::neon::vec_dot_q5k_q8k(n, xs, ys);
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
if n % QK_K != 0 {
crate::bail!("vec_dot_q5k_q8k: {n} is not divisible by {QK_K}")
}
const KMASK1: u32 = 0x3f3f3f3f;
const KMASK2: u32 = 0x0f0f0f0f;
const KMASK3: u32 = 0x03030303;
let mut utmp: [u32; 4] = [0; 4];
let mut scales: [u8; 8] = [0; 8];
let mut mins: [u8; 8] = [0; 8];
let mut aux8: [i8; QK_K] = [0; QK_K];
let mut aux16: [i16; 8] = [0; 8];
let mut sums: [f32; 8] = [0.0; 8];
let mut aux32: [i32; 8] = [0; 8];
let mut sumf = 0.0;
for (y, x) in ys.iter().zip(xs.iter()) {
let q5 = &x.qs;
let hm = &x.qh;
let q8 = &y.qs;
aux32.fill(0);
let mut a = &mut aux8[..];
let mut q5 = &q5[..];
let mut m = 1u8;
for _ in 0..QK_K / 64 {
for l in 0..32 {
a[l] = (q5[l] & 0xF) as i8;
a[l] += if hm[l] & m != 0 { 16 } else { 0 };
}
a = &mut a[32..];
m <<= 1;
for l in 0..32 {
a[l] = (q5[l] >> 4) as i8;
a[l] += if hm[l] & m != 0 { 16 } else { 0 };
}
a = &mut a[32..];
m <<= 1;
q5 = &q5[32..];
}
LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]);
utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4);
let uaux = utmp[1] & KMASK1;
utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4);
utmp[2] = uaux;
utmp[0] &= KMASK1;
//extract scales and mins
LittleEndian::write_u32_into(&utmp[0..2], &mut scales);
LittleEndian::write_u32_into(&utmp[2..4], &mut mins);
let mut sumi = 0;
for j in 0..QK_K / 16 {
sumi += y.bsums[j] as i32 * mins[j / 2] as i32;
}
let mut a = &mut aux8[..];
let mut q8 = &q8[..];
for scale in scales {
let scale = scale as i32;
for _ in 0..4 {
for l in 0..8 {
aux16[l] = q8[l] as i16 * a[l] as i16;
}
for l in 0..8 {
aux32[l] += scale * aux16[l] as i32;
}
q8 = &q8[8..];
a = &mut a[8..];
}
}
let d = x.d.to_f32() * y.d;
for l in 0..8 {
sums[l] += d * aux32[l] as f32;
}
let dmin = x.dmin.to_f32() * y.d;
sumf -= dmin * sumi as f32;
}
Ok(sumf + sums.iter().sum::<f32>())
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L793
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
for (block, x) in group_for_quantization(xs, ys)? {
let mut mins: [f32; QK_K / 32] = [0.0; QK_K / 32];
let mut scales: [f32; QK_K / 32] = [0.0; QK_K / 32];
for (j, x_scale_slice) in x.chunks_exact(32).enumerate() {
(scales[j], mins[j]) = make_qkx1_quants(31, 5, x_scale_slice);
}
// get max scale and max min and ensure they are >= 0.0
let max_scale = scales.iter().fold(0.0, |max, &val| val.max(max));
let max_min = mins.iter().fold(0.0, |max, &val| val.max(max));
let inv_scale = if max_scale > 0.0 {
63.0 / max_scale
} else {
0.0
};
let inv_min = if max_min > 0.0 { 63.0 / max_min } else { 0.0 };
for j in 0..QK_K / 32 {
let ls = nearest_int(inv_scale * scales[j]).min(63) as u8;
let lm = nearest_int(inv_min * mins[j]).min(63) as u8;
if j < 4 {
block.scales[j] = ls;
block.scales[j + 4] = lm;
} else {
block.scales[j + 4] = (ls & 0xF) | ((lm & 0xF) << 4);
block.scales[j - 4] |= (ls >> 4) << 6;
block.scales[j] |= (lm >> 4) << 6;
}
}
block.d = f16::from_f32(max_scale / 63.0);
block.dmin = f16::from_f32(max_min / 63.0);
let mut l: [u8; QK_K] = [0; QK_K];
for j in 0..QK_K / 32 {
let (sc, m) = get_scale_min_k4(j, &block.scales);
let d = block.d.to_f32() * sc as f32;
if d == 0.0 {
continue;
}
let dm = block.dmin.to_f32() * m as f32;
for ii in 0..32 {
let ll = nearest_int((x[32 * j + ii] + dm) / d);
l[32 * j + ii] = ll.clamp(0, 31) as u8;
}
}
let qh = &mut block.qh;
let ql = &mut block.qs;
qh.fill(0);
let mut m1 = 1;
let mut m2 = 2;
for n in (0..QK_K).step_by(64) {
let offset = (n / 64) * 32;
for j in 0..32 {
let mut l1 = l[n + j];
if l1 > 15 {
l1 -= 16;
qh[j] |= m1;
}
let mut l2 = l[n + j + 32];
if l2 > 15 {
l2 -= 16;
qh[j] |= m2;
}
ql[offset + j] = l1 | (l2 << 4);
}
m1 <<= 2;
m2 <<= 2;
}
}
Ok(())
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L928
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
for (block, y) in group_for_dequantization(xs, ys)? {
let d = block.d.to_f32();
let min = block.dmin.to_f32();
let ql = &block.qs;
let qh = &block.qh;
let mut is = 0;
let mut u1 = 1;
let mut u2 = 2;
let mut ys_index = 0;
for j in (0..QK_K).step_by(64) {
let ql = &ql[j / 2..j / 2 + 32];
let (sc, m) = get_scale_min_k4(is, &block.scales);
let d1 = d * sc as f32;
let m1 = min * m as f32;
let (sc, m) = get_scale_min_k4(is + 1, &block.scales);
let d2 = d * sc as f32;
let m2 = min * m as f32;
for (ql, qh) in ql.iter().zip(qh) {
let to_add = if qh & u1 != 0 { 16f32 } else { 0f32 };
y[ys_index] = d1 * ((ql & 0xF) as f32 + to_add) - m1;
ys_index += 1;
}
for (ql, qh) in ql.iter().zip(qh) {
let to_add = if qh & u2 != 0 { 16f32 } else { 0f32 };
y[ys_index] = d2 * ((ql >> 4) as f32 + to_add) - m2;
ys_index += 1;
}
is += 2;
u1 <<= 2;
u2 <<= 2;
}
}
Ok(())
}
}
impl GgmlType for BlockQ6K {
const DTYPE: GgmlDType = GgmlDType::Q6K;
const BLCK_SIZE: usize = QK_K;
type VecDotType = BlockQ8K;
#[allow(unreachable_code)]
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
#[cfg(target_feature = "avx2")]
return super::avx::vec_dot_q6k_q8k(n, xs, ys);
#[cfg(target_feature = "neon")]
return super::neon::vec_dot_q6k_q8k(n, xs, ys);
#[cfg(target_feature = "simd128")]
return super::simd128::vec_dot_q6k_q8k(n, xs, ys);
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
if n % QK_K != 0 {
crate::bail!("vec_dot_q6k_q8k: {n} is not divisible by {QK_K}")
}
let mut aux8 = [0i8; QK_K];
let mut aux16 = [0i16; 8];
let mut sums = [0f32; 8];
let mut aux32 = [0f32; 8];
for (x, y) in xs.iter().zip(ys.iter()) {
let q4 = &x.ql;
let qh = &x.qh;
let q8 = &y.qs;
aux32.fill(0f32);
for j in (0..QK_K).step_by(128) {
let aux8 = &mut aux8[j..];
let q4 = &q4[j / 2..];
let qh = &qh[j / 4..];
for l in 0..32 {
aux8[l] = (((q4[l] & 0xF) | ((qh[l] & 3) << 4)) as i32 - 32) as i8;
aux8[l + 32] =
(((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) as i32 - 32) as i8;
aux8[l + 64] = (((q4[l] >> 4) | (((qh[l] >> 4) & 3) << 4)) as i32 - 32) as i8;
aux8[l + 96] =
(((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) as i32 - 32) as i8;
}
}
for (j, &scale) in x.scales.iter().enumerate() {
let scale = scale as f32;
let q8 = &q8[16 * j..];
let aux8 = &aux8[16 * j..];
for l in 0..8 {
aux16[l] = q8[l] as i16 * aux8[l] as i16;
}
for l in 0..8 {
aux32[l] += scale * aux16[l] as f32
}
let q8 = &q8[8..];
let aux8 = &aux8[8..];
for l in 0..8 {
aux16[l] = q8[l] as i16 * aux8[l] as i16;
}
for l in 0..8 {
aux32[l] += scale * aux16[l] as f32
}
}
let d = x.d.to_f32() * y.d;
for (sum, &a) in sums.iter_mut().zip(aux32.iter()) {
*sum += a * d;
}
}
Ok(sums.iter().sum())
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
if xs.len() != ys.len() * Self::BLCK_SIZE {
crate::bail!(
"quantize_row_q6k: size mismatch {} {} {}",
xs.len(),
ys.len(),
Self::BLCK_SIZE
)
}
let mut l = [0i8; QK_K];
let mut scales = [0f32; QK_K / 16];
let mut x = xs.as_ptr();
let l = l.as_mut_ptr();
unsafe {
for y in ys.iter_mut() {
let mut max_scale = 0f32;
let mut max_abs_scale = 0f32;
for (ib, scale_) in scales.iter_mut().enumerate() {
let scale = make_qx_quants(16, 32, x.add(16 * ib), l.add(16 * ib), 1);
*scale_ = scale;
let abs_scale = scale.abs();
if abs_scale > max_abs_scale {
max_abs_scale = abs_scale;
max_scale = scale
}
}
let iscale = -128f32 / max_scale;
y.d = f16::from_f32(1.0 / iscale);
for (y_scale, scale) in y.scales.iter_mut().zip(scales.iter()) {
*y_scale = nearest_int(iscale * scale).min(127) as i8
}
for (j, &y_scale) in y.scales.iter().enumerate() {
let d = y.d.to_f32() * y_scale as f32;
if d == 0. {
continue;
}
for ii in 0..16 {
let ll = nearest_int(*x.add(16 * j + ii) / d).clamp(-32, 31);
*l.add(16 * j + ii) = (ll + 32) as i8
}
}
let mut ql = y.ql.as_mut_ptr();
let mut qh = y.qh.as_mut_ptr();
for j in (0..QK_K).step_by(128) {
for l_idx in 0..32 {
let q1 = *l.add(j + l_idx) & 0xF;
let q2 = *l.add(j + l_idx + 32) & 0xF;
let q3 = *l.add(j + l_idx + 64) & 0xF;
let q4 = *l.add(j + l_idx + 96) & 0xF;
*ql.add(l_idx) = (q1 | (q3 << 4)) as u8;
*ql.add(l_idx + 32) = (q2 | (q4 << 4)) as u8;
*qh.add(l_idx) = ((*l.add(j + l_idx) >> 4)
| ((*l.add(j + l_idx + 32) >> 4) << 2)
| ((*l.add(j + l_idx + 64) >> 4) << 4)
| ((*l.add(j + l_idx + 96) >> 4) << 6))
as u8;
}
ql = ql.add(64);
qh = qh.add(32);
}
x = x.add(QK_K)
}
}
Ok(())
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L1067
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
let k = ys.len();
if k % QK_K != 0 {
crate::bail!("dequantize_row_q6k: {k} is not divisible by {QK_K}")
}
for (idx_x, x) in xs.iter().enumerate() {
let d = x.d.to_f32();
let ql = &x.ql;
let qh = &x.qh;
let sc = &x.scales;
for n in (0..QK_K).step_by(128) {
let idx = n / 128;
let ys = &mut ys[idx_x * QK_K + n..];
let sc = &sc[8 * idx..];
let ql = &ql[64 * idx..];
let qh = &qh[32 * idx..];
for l in 0..32 {
let is = l / 16;
let q1 = ((ql[l] & 0xF) | ((qh[l] & 3) << 4)) as i8 - 32;
let q2 = ((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) as i8 - 32;
let q3 = ((ql[l] >> 4) | (((qh[l] >> 4) & 3) << 4)) as i8 - 32;
let q4 = ((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) as i8 - 32;
ys[l] = d * sc[is] as f32 * q1 as f32;
ys[l + 32] = d * sc[is + 2] as f32 * q2 as f32;
ys[l + 64] = d * sc[is + 4] as f32 * q3 as f32;
ys[l + 96] = d * sc[is + 6] as f32 * q4 as f32;
}
}
}
Ok(())
}
}
impl GgmlType for BlockQ8K {
const DTYPE: GgmlDType = GgmlDType::Q8K;
const BLCK_SIZE: usize = QK_K;
type VecDotType = BlockQ8K;
#[allow(unreachable_code)]
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
#[cfg(target_feature = "avx2")]
return super::avx::vec_dot_q8k_q8k(n, xs, ys);
#[cfg(target_feature = "neon")]
return super::neon::vec_dot_q8k_q8k(n, xs, ys);
#[cfg(target_feature = "simd128")]
return super::simd128::vec_dot_q8k_q8k(n, xs, ys);
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
let qk = QK_K;
if n % QK_K != 0 {
crate::bail!("vec_dot_q8k_q8k: {n} is not divisible by {qk}")
}
// Generic implementation.
let mut sumf = 0f32;
for (xs, ys) in xs.iter().zip(ys.iter()) {
let sum_i = xs
.qs
.iter()
.zip(ys.qs.iter())
.map(|(&x, &y)| x as i32 * y as i32)
.sum::<i32>();
sumf += sum_i as f32 * xs.d * ys.d
}
Ok(sumf)
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
let k = xs.len();
if k % QK_K != 0 {
crate::bail!("quantize_row_q8k: {k} is not divisible by {QK_K}")
}
for (i, y) in ys.iter_mut().enumerate() {
let mut max = 0f32;
let mut amax = 0f32;
let xs = &xs[i * QK_K..(i + 1) * QK_K];
for &x in xs.iter() {
if amax < x.abs() {
amax = x.abs();
max = x;
}
}
if amax == 0f32 {
y.d = 0f32;
y.qs.fill(0)
} else {
let iscale = -128f32 / max;
for (j, q) in y.qs.iter_mut().enumerate() {
// ggml uses nearest_int with bit magic here, maybe we want the same
// but we would have to test and benchmark it.
let v = (iscale * xs[j]).round();
*q = v.min(127.) as i8
}
for j in 0..QK_K / 16 {
let mut sum = 0i32;
for ii in 0..16 {
sum += y.qs[j * 16 + ii] as i32
}
y.bsums[j] = sum as i16
}
y.d = 1.0 / iscale
}
}
Ok(())
}
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
let k = ys.len();
if k % QK_K != 0 {
crate::bail!("dequantize_row_q8k: {k} is not divisible by {QK_K}")
}
for (i, x) in xs.iter().enumerate() {
for (j, &q) in x.qs.iter().enumerate() {
ys[i * QK_K + j] = x.d * q as f32
}
}
Ok(())
}
}
// https://github.com/ggerganov/llama.cpp/blob/b5ffb2849d23afe73647f68eec7b68187af09be6/ggml.c#L10605
pub fn matmul<T: GgmlType>(
mkn: (usize, usize, usize),
lhs: &[f32],
rhs_t: &[T],
dst: &mut [f32],
) -> Result<()> {
let (m, k, n) = mkn;
if m * k != lhs.len() {
crate::bail!("unexpected lhs length {} {mkn:?}", lhs.len());
}
let k_in_lhs_blocks = k.div_ceil(T::BLCK_SIZE);
let k_in_rhs_blocks = k.div_ceil(T::VecDotType::BLCK_SIZE);
// TODO: Do not make this copy if the DotType is f32.
// TODO: Pre-allocate this.
let mut lhs_b = vec![T::VecDotType::zeros(); m * k_in_lhs_blocks];
for row_idx in 0..m {
let lhs_b = &mut lhs_b[row_idx * k_in_lhs_blocks..(row_idx + 1) * k_in_lhs_blocks];
let lhs = &lhs[row_idx * k..(row_idx + 1) * k];
T::VecDotType::from_float(lhs, lhs_b)?
}
let lhs_b = lhs_b.as_slice();
for row_idx in 0..m {
let lhs_row = &lhs_b[row_idx * k_in_lhs_blocks..(row_idx + 1) * k_in_lhs_blocks];
let dst_row = &mut dst[row_idx * n..(row_idx + 1) * n];
let result: Result<Vec<_>> = dst_row
.into_par_iter()
.enumerate()
.with_min_len(128)
.with_max_len(512)
.map(|(col_idx, dst)| {
let rhs_col = &rhs_t[col_idx * k_in_rhs_blocks..(col_idx + 1) * k_in_rhs_blocks];
T::vec_dot(k, rhs_col, lhs_row).map(|value| *dst = value)
})
.collect();
result?;
}
Ok(())
}
impl GgmlType for f32 {
const DTYPE: GgmlDType = GgmlDType::F32;
const BLCK_SIZE: usize = 1;
type VecDotType = f32;
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
if xs.len() < n {
crate::bail!("size mismatch {} < {n}", xs.len())
}
if ys.len() < n {
crate::bail!("size mismatch {} < {n}", ys.len())
}
let mut res = 0f32;
unsafe { crate::cpu::vec_dot_f32(xs.as_ptr(), ys.as_ptr(), &mut res, n) };
Ok(res)
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
if xs.len() != ys.len() {
crate::bail!("size mismatch {} {}", xs.len(), ys.len());
}
ys.copy_from_slice(xs);
Ok(())
}
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
if xs.len() != ys.len() {
crate::bail!("size mismatch {} {}", xs.len(), ys.len());
}
ys.copy_from_slice(xs);
Ok(())
}
}
impl GgmlType for f16 {
const DTYPE: GgmlDType = GgmlDType::F16;
const BLCK_SIZE: usize = 1;
type VecDotType = f16;
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
if xs.len() < n {
crate::bail!("size mismatch {} < {n}", xs.len())
}
if ys.len() < n {
crate::bail!("size mismatch {} < {n}", ys.len())
}
let mut res = 0f32;
unsafe { crate::cpu::vec_dot_f16(xs.as_ptr(), ys.as_ptr(), &mut res, n) };
Ok(res)
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
if xs.len() != ys.len() {
crate::bail!("size mismatch {} {}", xs.len(), ys.len());
}
// TODO: vectorize
for (x, y) in xs.iter().zip(ys.iter_mut()) {
*y = f16::from_f32(*x)
}
Ok(())
}
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
if xs.len() != ys.len() {
crate::bail!("size mismatch {} {}", xs.len(), ys.len());
}
// TODO: vectorize
for (x, y) in xs.iter().zip(ys.iter_mut()) {
*y = x.to_f32()
}
Ok(())
}
}
impl GgmlType for bf16 {
const DTYPE: GgmlDType = GgmlDType::BF16;
const BLCK_SIZE: usize = 1;
type VecDotType = bf16;
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
Self::vec_dot_unopt(n, xs, ys)
}
fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
if xs.len() < n {
crate::bail!("size mismatch {} < {n}", xs.len())
}
if ys.len() < n {
crate::bail!("size mismatch {} < {n}", ys.len())
}
let mut res = 0f32;
unsafe { crate::cpu::vec_dot_bf16(xs.as_ptr(), ys.as_ptr(), &mut res, n) };
Ok(res)
}
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
if xs.len() != ys.len() {
crate::bail!("size mismatch {} {}", xs.len(), ys.len());
}
// TODO: vectorize
for (x, y) in xs.iter().zip(ys.iter_mut()) {
*y = bf16::from_f32(*x)
}
Ok(())
}
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
if xs.len() != ys.len() {
crate::bail!("size mismatch {} {}", xs.len(), ys.len());
}
// TODO: vectorize
for (x, y) in xs.iter().zip(ys.iter_mut()) {
*y = x.to_f32()
}
Ok(())
}
}
| candle/candle-core/src/quantized/k_quants.rs/0 | {
"file_path": "candle/candle-core/src/quantized/k_quants.rs",
"repo_id": "candle",
"token_count": 43384
} | 31 |
//! Useful functions for checking features.
use std::str::FromStr;
pub fn get_num_threads() -> usize {
// Respond to the same environment variable as rayon.
match std::env::var("RAYON_NUM_THREADS")
.ok()
.and_then(|s| usize::from_str(&s).ok())
{
Some(x) if x > 0 => x,
Some(_) | None => num_cpus::get(),
}
}
pub fn has_accelerate() -> bool {
cfg!(feature = "accelerate")
}
pub fn has_mkl() -> bool {
cfg!(feature = "mkl")
}
pub fn cuda_is_available() -> bool {
cfg!(feature = "cuda")
}
pub fn metal_is_available() -> bool {
cfg!(feature = "metal")
}
pub fn with_avx() -> bool {
cfg!(target_feature = "avx2")
}
pub fn with_neon() -> bool {
cfg!(target_feature = "neon")
}
pub fn with_simd128() -> bool {
cfg!(target_feature = "simd128")
}
pub fn with_f16c() -> bool {
cfg!(target_feature = "f16c")
}
| candle/candle-core/src/utils.rs/0 | {
"file_path": "candle/candle-core/src/utils.rs",
"repo_id": "candle",
"token_count": 399
} | 32 |
use candle_core::{test_device, test_utils, DType, Device, IndexOp, Result, Tensor, D};
use float8::F8E4M3;
fn zeros(device: &Device) -> Result<()> {
let tensor = Tensor::zeros((5, 2), DType::F32, device)?;
let (dim1, dim2) = tensor.dims2()?;
assert_eq!(dim1, 5);
assert_eq!(dim2, 2);
Ok(())
}
fn ones(device: &Device) -> Result<()> {
assert_eq!(
Tensor::ones((2, 3), DType::U8, device)?.to_vec2::<u8>()?,
[[1, 1, 1], [1, 1, 1]],
);
assert_eq!(
Tensor::ones((2, 3), DType::U32, device)?.to_vec2::<u32>()?,
[[1, 1, 1], [1, 1, 1]],
);
assert_eq!(
Tensor::ones((2, 3), DType::I64, device)?.to_vec2::<i64>()?,
[[1, 1, 1], [1, 1, 1]],
);
assert_eq!(
Tensor::ones((2, 3), DType::F32, device)?.to_vec2::<f32>()?,
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
);
if !device.is_metal() {
assert_eq!(
Tensor::ones((2, 3), DType::F64, device)?.to_vec2::<f64>()?,
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
);
}
assert_eq!(
Tensor::ones((2, 3), DType::F16, device)?.to_vec2::<half::f16>()?,
[
[
half::f16::from_f32(1.0),
half::f16::from_f32(1.0),
half::f16::from_f32(1.0)
],
[
half::f16::from_f32(1.0),
half::f16::from_f32(1.0),
half::f16::from_f32(1.0)
]
],
);
assert_eq!(
Tensor::ones((2, 3), DType::BF16, device)?.to_vec2::<half::bf16>()?,
[
[
half::bf16::from_f32(1.0),
half::bf16::from_f32(1.0),
half::bf16::from_f32(1.0)
],
[
half::bf16::from_f32(1.0),
half::bf16::from_f32(1.0),
half::bf16::from_f32(1.0)
]
],
);
if !device.is_metal() {
assert_eq!(
Tensor::ones((2, 3), DType::F8E4M3, device)?.to_vec2::<F8E4M3>()?,
[
[
F8E4M3::from_f32(1.),
F8E4M3::from_f32(1.),
F8E4M3::from_f32(1.)
],
[
F8E4M3::from_f32(1.),
F8E4M3::from_f32(1.),
F8E4M3::from_f32(1.)
]
],
);
}
Ok(())
}
fn full(device: &Device) -> Result<()> {
let tensor = Tensor::zeros((3, 4), DType::U32, device)?;
tensor.const_set(42u32.into())?;
assert_eq!(
tensor.to_vec2::<u32>()?,
[[42, 42, 42, 42], [42, 42, 42, 42], [42, 42, 42, 42]]
);
tensor.i((.., 2))?.const_set(1337u32.into())?;
assert_eq!(
tensor.to_vec2::<u32>()?,
[[42, 42, 1337, 42], [42, 42, 1337, 42], [42, 42, 1337, 42]]
);
tensor.i((2, ..))?.const_set(1u32.into())?;
assert_eq!(
tensor.to_vec2::<u32>()?,
[[42, 42, 1337, 42], [42, 42, 1337, 42], [1, 1, 1, 1]]
);
Ok(())
}
fn const_set(device: &Device) -> Result<()> {
assert_eq!(
Tensor::full(42u32, (2, 3), device)?.to_vec2::<u32>()?,
[[42, 42, 42], [42, 42, 42]],
);
Ok(())
}
fn arange(device: &Device) -> Result<()> {
assert_eq!(
Tensor::arange(0u8, 5u8, device)?.to_vec1::<u8>()?,
[0, 1, 2, 3, 4],
);
assert_eq!(
Tensor::arange_step(0u8, 5u8, 2, device)?.to_vec1::<u8>()?,
[0, 2, 4],
);
assert_eq!(
Tensor::arange_step(0u8, 5u8, 3, device)?.to_vec1::<u8>()?,
[0, 3],
);
assert_eq!(
Tensor::arange_step(5i64, 0i64, -1, device)?.to_vec1::<i64>()?,
[5, 4, 3, 2, 1],
);
if !device.is_metal() {
assert_eq!(
Tensor::arange_step(
F8E4M3::from_f32(0.),
F8E4M3::from_f32(5.),
F8E4M3::from_f32(2.),
device
)?
.to_vec1::<F8E4M3>()?,
[
F8E4M3::from_f32(0.),
F8E4M3::from_f32(2.),
F8E4M3::from_f32(4.),
],
);
}
Ok(())
}
fn add_mul(device: &Device) -> Result<()> {
let tensor = Tensor::new(&[3f32, 1., 4.], device)?;
let dim1 = tensor.dims1()?;
assert_eq!(dim1, 3);
let content: Vec<f32> = tensor.to_vec1()?;
assert_eq!(content, [3., 1., 4.]);
let tensor = Tensor::add(&tensor, &tensor)?;
let content: Vec<f32> = tensor.to_vec1()?;
assert_eq!(content, [6., 2., 8.]);
let tensor = Tensor::mul(&tensor, &tensor)?;
let content: Vec<f32> = tensor.to_vec1()?;
assert_eq!(content, [36., 4., 64.]);
Ok(())
}
fn tensor_2d(device: &Device) -> Result<()> {
let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]];
let tensor = Tensor::new(data, device)?;
let dims = tensor.dims2()?;
assert_eq!(dims, (2, 5));
let content: Vec<Vec<f32>> = tensor.to_vec2()?;
assert_eq!(content, data);
Ok(())
}
fn clamp(device: &Device) -> Result<()> {
let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]];
let tensor = Tensor::new(data, device)?;
let tensor = tensor.clamp(1.5, 6.2)?;
assert_eq!(
tensor.to_vec2::<f32>()?,
[[3.0, 1.5, 4.0, 1.5, 5.0], [2.0, 1.5, 6.2, 6.2, 2.0]],
);
Ok(())
}
fn asort(device: &Device) -> Result<()> {
let data = &[[3f32, 1., 4., 1.1, 5.], [2.1, 1., 7., 8., 2.]];
let tensor = Tensor::new(data, device)?;
let indexes = tensor.arg_sort_last_dim(true)?;
assert_eq!(
indexes.to_vec2::<u32>()?,
[[1, 3, 0, 2, 4], [1, 4, 0, 2, 3]],
);
let indexes = tensor.arg_sort_last_dim(false)?;
assert_eq!(
indexes.to_vec2::<u32>()?,
[[4, 2, 0, 3, 1], [3, 2, 0, 4, 1]],
);
let (sorted, indexes) = tensor.sort_last_dim(true)?;
assert_eq!(
indexes.to_vec2::<u32>()?,
[[1, 3, 0, 2, 4], [1, 4, 0, 2, 3]],
);
assert_eq!(
sorted.to_vec2::<f32>()?,
[[1.0, 1.1, 3.0, 4.0, 5.0], [1.0, 2.0, 2.1, 7.0, 8.0]]
);
let (sorted, indexes) = tensor.sort_last_dim(false)?;
assert_eq!(
indexes.to_vec2::<u32>()?,
[[4, 2, 0, 3, 1], [3, 2, 0, 4, 1]],
);
assert_eq!(
sorted.to_vec2::<f32>()?,
[[5.0, 4.0, 3.0, 1.1, 1.0], [8.0, 7.0, 2.1, 2.0, 1.0]]
);
Ok(())
}
fn unary_op(device: &Device) -> Result<()> {
let data = &[[-3f32, 1., 4., -0.1, 0.5], [2.7, -1.8, -0.28, 1.8, 2.8]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
test_utils::to_vec2_round(&tensor.gelu()?, 4)?,
[
[-0.0036, 0.8412, 3.9999, -0.046, 0.3457],
[2.6911, -0.0647, -0.1091, 1.7353, 2.7933]
]
);
let t_f16 = tensor.to_dtype(DType::F16)?.gelu()?.to_dtype(DType::F32)?;
let max_diff = (tensor.gelu()? - t_f16)?.flatten_all()?.max(0)?;
assert!(max_diff.to_vec0::<f32>()? < 5e-3);
assert_eq!(
test_utils::to_vec2_round(&tensor.gelu_erf()?, 4)?,
[
[-0.004, 0.8413, 3.9999, -0.046, 0.3457],
[2.6906, -0.0647, -0.1091, 1.7353, 2.7928]
]
);
assert_eq!(
test_utils::to_vec2_round(&tensor.erf()?, 4)?,
[
[-1.0, 0.8427, 1.0, -0.1125, 0.5205],
[0.9999, -0.9891, -0.3079, 0.9891, 0.9999]
]
);
assert_eq!(
test_utils::to_vec2_round(&tensor.silu()?, 4)?,
[
[-0.1423, 0.7311, 3.9281, -0.0475, 0.3112],
[2.53, -0.2553, -0.1205, 1.5447, 2.6395]
]
);
assert_eq!(
test_utils::to_vec2_round(&tensor.ceil()?, 4)?,
[[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -1.0, -0.0, 2.0, 3.0]]
);
assert_eq!(
test_utils::to_vec2_round(&tensor.floor()?, 4)?,
[[-3.0, 1.0, 4.0, -1.0, 0.0], [2.0, -2.0, -1.0, 1.0, 2.0]]
);
assert_eq!(
test_utils::to_vec2_round(&tensor.round()?, 4)?,
[[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -2.0, -0.0, 2.0, 3.0]]
);
let tensor = Tensor::new(&[2997.9246, 314.15926f32], device)?;
assert_eq!(
test_utils::to_vec1_round(&tensor.round_to(2)?, 4)?,
[2997.92, 314.16]
);
assert_eq!(
test_utils::to_vec1_round(&tensor.round_to(-2)?, 4)?,
[3000.0, 300.]
);
let tensor = Tensor::new(
&[-1.01f32, -0.9, -0.1, 0.0, -0.0, 0.1, 0.9, 1.0, 1.1],
device,
)?;
assert_eq!(
tensor.sign()?.to_vec1::<f32>()?,
[-1., -1., -1., 0., 0., 1., 1., 1., 1.]
);
let tensor = Tensor::new(&[-1.0f32, 0., -2., 3.], device)?;
let y = tensor.elu(2.)?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[-1.2642, 0.0000, -1.7293, 3.0000]
);
// This test failed on metal prior to the following PR:
// https://github.com/huggingface/candle/pull/2490
let y = tensor.reshape((2, 2))?.t()?.elu(2.)?.flatten_all()?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[-1.2642, -1.7293, 0.0000, 3.0000]
);
Ok(())
}
fn binary_op(device: &Device) -> Result<()> {
let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]];
let tensor1 = Tensor::new(data, device)?;
let data2 = &[[5f32, 5., 5., 5., 5.], [2., 1., 7., 8., 2.]];
let tensor2 = Tensor::new(data2, device)?;
let tensor = (&tensor1 + (&tensor1 * &tensor1)? / (&tensor1 + &tensor2))?;
let dims = tensor.dims2()?;
assert_eq!(dims, (2, 5));
let content: Vec<Vec<f32>> = tensor.to_vec2()?;
assert_eq!(content[0], [4.125, 1.1666666, 5.7777777, 1.1666666, 7.5]);
assert_eq!(content[1], [3.0, 1.5, 10.5, 12.0, 3.0]);
#[allow(clippy::eq_op)]
let tensor = (&tensor - &tensor)?;
let content: Vec<Vec<f32>> = tensor.to_vec2()?;
assert_eq!(content[0], [0., 0., 0., 0., 0.]);
let min = tensor1.minimum(&(&tensor2 * 0.5)?)?;
let max = tensor1.maximum(&(&tensor2 * 0.5)?)?;
assert_eq!(
min.to_vec2::<f32>()?,
[[2.5, 1.0, 2.5, 1.0, 2.5], [1.0, 0.5, 3.5, 4.0, 1.0]],
);
assert_eq!(
max.to_vec2::<f32>()?,
[[3.0, 2.5, 4.0, 2.5, 5.0], [2.0, 1.0, 7.0, 8.0, 2.0]]
);
Ok(())
}
fn transpose(device: &Device) -> Result<()> {
let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]];
let tensor = Tensor::new(data, device)?.t()?;
let dims = tensor.dims2()?;
assert_eq!(dims, (5, 2));
assert_eq!(
tensor.to_vec2::<f32>()?,
&[[3f32, 2.], [1., 1.], [4., 7.], [1., 8.], [5., 2.]]
);
assert_eq!(tensor.t()?.to_vec2::<f32>()?, data);
assert_eq!(tensor.contiguous()?.t()?.to_vec2::<f32>()?, data);
assert_eq!(((tensor + 1.)?.t()? - 1.)?.to_vec2::<f32>()?, data);
Ok(())
}
fn var(device: &Device) -> Result<()> {
// Values taken from https://pytorch.org/docs/stable/generated/torch.var.html
let data = &[
[0.2035f32, 1.2959, 1.8101, -0.4644],
[1.5027, -0.3270, 0.5905, 0.6538],
[-1.5745, 1.3330, -0.5596, -0.6548],
[0.1264, -0.5080, 1.6420, 0.1992],
];
let tensor = Tensor::new(data, device)?;
assert_eq!(
test_utils::to_vec2_round(&tensor.var_keepdim(1)?, 4)?,
&[[1.0631], [0.559], [1.4893], [0.8258]]
);
Ok(())
}
fn sum(device: &Device) -> Result<()> {
let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.sum_keepdim(2)?.to_vec3::<u32>()?,
&[[[8], [15]], [[10], [18]]]
);
assert_eq!(
tensor.sum_keepdim(0)?.to_vec3::<u32>()?,
&[[[5, 2, 11], [9, 7, 17]]],
);
assert_eq!(tensor.sum_keepdim((0, 2, 1))?.to_vec3::<u32>()?, &[[[51]]],);
assert_eq!(
tensor.t()?.sum_keepdim(1)?.t()?.to_vec3::<u32>()?,
&[[[8], [15]], [[10], [18]]]
);
assert_eq!(
tensor.sum_keepdim((2, 1))?.to_vec3::<u32>()?,
&[[[8 + 15]], [[10 + 18]]]
);
let data: Vec<u32> = (0..4000u32).collect();
let tensor = Tensor::new(data.as_slice(), device)?;
assert_eq!(tensor.sum_keepdim(0)?.to_vec1::<u32>()?, &[7998000]);
let tensor = tensor.reshape((2000, 2))?;
assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]);
assert_eq!(
tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?,
&[[7998000]]
);
assert_eq!(
tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?,
&[[7998000]]
);
assert_eq!(
tensor.sum_keepdim(0)?.to_vec2::<u32>()?,
&[[3998000, 4000000]]
);
// Make the tensor non contiguous.
let tensor = tensor.t()?.contiguous()?.t()?;
assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]);
assert_eq!(
tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?,
&[[7998000]]
);
assert_eq!(
tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?,
&[[7998000]]
);
assert_eq!(
tensor.sum_keepdim(0)?.to_vec2::<u32>()?,
&[[3998000, 4000000]]
);
let t1 = tensor.reshape((200, 5, 4))?;
let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?;
for tensor in [t1, t2] {
assert_eq!(
tensor.sum_keepdim((0, 1, 2))?.to_vec3::<u32>()?,
&[[[7998000]]]
);
assert_eq!(
tensor
.sum_keepdim(0)?
.sum_keepdim(2)?
.sum_keepdim(1)?
.to_vec3::<u32>()?,
&[[[7998000]]]
);
assert_eq!(
tensor
.sum_keepdim(0)?
.sum_keepdim((1, 2))?
.to_vec3::<u32>()?,
&[[[7998000]]]
);
assert_eq!(
tensor
.sum_keepdim(1)?
.sum_keepdim((0, 2))?
.to_vec3::<u32>()?,
&[[[7998000]]]
);
assert_eq!(
tensor.sum_keepdim(0)?.to_vec3::<u32>()?,
&[[
[398000, 398200, 398400, 398600],
[398800, 399000, 399200, 399400],
[399600, 399800, 400000, 400200],
[400400, 400600, 400800, 401000],
[401200, 401400, 401600, 401800]
]]
);
}
Ok(())
}
fn min(device: &Device) -> Result<()> {
let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.min_keepdim(2)?.to_vec3::<u32>()?,
&[[[1], [1]], [[1], [2]]]
);
assert_eq!(
tensor.min_keepdim(0)?.to_vec3::<u32>()?,
&[[[2, 1, 4], [1, 2, 8]]],
);
let data: Vec<u32> = (200..4000u32).collect();
let tensor = Tensor::new(data.as_slice(), device)?;
assert_eq!(tensor.min_keepdim(0)?.to_vec1::<u32>()?, &[200]);
let tensor = tensor.reshape((1900, 2))?;
assert_eq!(
tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?,
&[[200]]
);
assert_eq!(
tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?,
&[[200]]
);
assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]);
// Make the tensor non contiguous.
let tensor = tensor.t()?.contiguous()?.t()?;
assert_eq!(
tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?,
&[[200]]
);
assert_eq!(
tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?,
&[[200]]
);
assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]);
let t1 = tensor.reshape((190, 5, 4))?;
let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?;
for tensor in [t1, t2] {
assert_eq!(
tensor
.min_keepdim(0)?
.min_keepdim(2)?
.min_keepdim(1)?
.to_vec3::<u32>()?,
&[[[200]]]
);
assert_eq!(
tensor.min_keepdim(0)?.to_vec3::<u32>()?,
&[[
[200, 201, 202, 203],
[204, 205, 206, 207],
[208, 209, 210, 211],
[212, 213, 214, 215],
[216, 217, 218, 219]
]]
);
}
Ok(())
}
fn max(device: &Device) -> Result<()> {
let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.max_keepdim(2)?.to_vec3::<u32>()?,
&[[[4], [9]], [[7], [8]]]
);
assert_eq!(
tensor.max_keepdim(0)?.to_vec3::<u32>()?,
&[[[3, 1, 7], [8, 5, 9]]],
);
let data: Vec<u32> = (200..4000u32).collect();
let tensor = Tensor::new(data.as_slice(), device)?;
assert_eq!(tensor.max_keepdim(0)?.to_vec1::<u32>()?, &[3999]);
let tensor = tensor.reshape((1900, 2))?;
assert_eq!(
tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?,
&[[3999]]
);
assert_eq!(
tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?,
&[[3999]]
);
assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]);
// Make the tensor non contiguous.
let tensor = tensor.t()?.contiguous()?.t()?;
assert_eq!(
tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?,
&[[3999]]
);
assert_eq!(
tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?,
&[[3999]]
);
assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]);
let t1 = tensor.reshape((190, 5, 4))?;
let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?;
for tensor in [t1, t2] {
assert_eq!(
tensor
.max_keepdim(0)?
.max_keepdim(2)?
.max_keepdim(1)?
.to_vec3::<u32>()?,
&[[[3999]]]
);
assert_eq!(
tensor.max_keepdim(0)?.to_vec3::<u32>()?,
&[[
[3980, 3981, 3982, 3983],
[3984, 3985, 3986, 3987],
[3988, 3989, 3990, 3991],
[3992, 3993, 3994, 3995],
[3996, 3997, 3998, 3999]
]]
);
}
Ok(())
}
fn argmin(device: &Device) -> Result<()> {
let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.argmin_keepdim(2)?.to_vec3::<u32>()?,
&[[[1], [0]], [[1], [1]]]
);
assert_eq!(
tensor.argmin_keepdim(0)?.to_vec3::<u32>()?,
&[[[1, 0, 0], [0, 1, 1]]],
);
let data: Vec<u32> = (200..4000u32).collect();
let tensor = Tensor::new(data.as_slice(), device)?;
assert_eq!(tensor.argmin_keepdim(0)?.to_vec1::<u32>()?, &[0]);
let tensor = tensor.reshape((1900, 2))?;
assert_eq!(
tensor
.argmin_keepdim(0)?
.argmin_keepdim(1)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(
tensor
.argmin_keepdim(1)?
.argmin_keepdim(0)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]);
// Make the tensor non contiguous.
let tensor = tensor.t()?.contiguous()?.t()?;
assert_eq!(
tensor
.argmin_keepdim(0)?
.argmin_keepdim(1)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(
tensor
.argmin_keepdim(1)?
.argmin_keepdim(0)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]);
let t1 = tensor.reshape((190, 5, 4))?;
let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?;
for tensor in [t1, t2] {
assert_eq!(
tensor
.argmin_keepdim(0)?
.argmin_keepdim(2)?
.argmin_keepdim(1)?
.to_vec3::<u32>()?,
&[[[0]]]
);
assert_eq!(
tensor.argmin_keepdim(0)?.to_vec3::<u32>()?,
&[[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]]
);
}
Ok(())
}
fn argmax(device: &Device) -> Result<()> {
let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.argmax_keepdim(2)?.to_vec3::<u32>()?,
&[[[2], [2]], [[2], [0]]]
);
assert_eq!(
tensor.argmax_keepdim(0)?.to_vec3::<u32>()?,
&[[[0, 0, 1], [1, 0, 0]]],
);
let data: Vec<u32> = (200..4000u32).collect();
let tensor = Tensor::new(data.as_slice(), device)?;
assert_eq!(tensor.argmax_keepdim(0)?.to_vec1::<u32>()?, &[3799]);
let tensor = tensor.reshape((1900, 2))?;
assert_eq!(
tensor
.argmax_keepdim(0)?
.argmax_keepdim(1)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(
tensor
.argmax_keepdim(1)?
.argmax_keepdim(0)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]);
// Make the tensor non contiguous.
let tensor = tensor.t()?.contiguous()?.t()?;
assert_eq!(
tensor
.argmax_keepdim(0)?
.argmax_keepdim(1)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(
tensor
.argmax_keepdim(1)?
.argmax_keepdim(0)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]);
let t1 = tensor.reshape((190, 5, 4))?;
let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?;
for tensor in [t1, t2] {
assert_eq!(
tensor
.argmax_keepdim(0)?
.argmax_keepdim(2)?
.argmax_keepdim(1)?
.to_vec3::<u32>()?,
&[[[0]]]
);
assert_eq!(
tensor.argmax_keepdim(0)?.to_vec3::<u32>()?,
&[[
[189, 189, 189, 189],
[189, 189, 189, 189],
[189, 189, 189, 189],
[189, 189, 189, 189],
[189, 189, 189, 189],
]]
);
}
Ok(())
}
fn narrow(device: &Device) -> Result<()> {
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.narrow(2, 1, 2)?.to_vec3::<f32>()?,
&[[[1.0, 4.0], [5.0, 9.0]], [[1.0, 7.0], [2.0, 8.0]]],
);
assert_eq!(
tensor.narrow(1, 1, 1)?.to_vec3::<f32>()?,
&[[[1.0, 5.0, 9.0]], [[8.0, 2.0, 8.0]]],
);
assert_eq!(
tensor.narrow(0, 0, 1)?.to_vec3::<f32>()?,
&[[[3.0, 1.0, 4.0], [1.0, 5.0, 9.0]]],
);
assert_eq!(
tensor.narrow(0, 1, 1)?.to_vec3::<f32>()?,
&[[[2.0, 1.0, 7.0], [8.0, 2.0, 8.0]]],
);
// The following has been checked against PyTorch via:
// import torch
// t = torch.tensor([[[3., 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]])
// t.transpose(-1, -2).narrow(1, 1, 2)
assert_eq!(
tensor.t()?.narrow(1, 1, 2)?.to_vec3::<f32>()?,
&[[[1.0, 5.0], [4.0, 9.0]], [[1.0, 2.0], [7.0, 8.0]]],
);
Ok(())
}
fn broadcast(device: &Device) -> Result<()> {
let data = &[3f32, 1., 4.];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.broadcast_left((3, 1))?.to_vec3::<f32>()?,
&[[[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]]]
);
Ok(())
}
fn slice_set(device: &Device) -> Result<()> {
let (b, h, max_t, d) = (2, 4, 7, 3);
let cache = Tensor::zeros((b, h, max_t, d), DType::F32, device)?;
let tensor = Tensor::randn(0f32, 1f32, (b, h, 4, d), device)?;
cache.slice_set(&tensor, 2, 0)?;
let cache_t = cache.narrow(2, 0, 4)?;
let diff = (cache_t - &tensor)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
cache.slice_set(&tensor, 2, 1)?;
let cache_t = cache.narrow(2, 1, 4)?;
let diff = (cache_t - &tensor)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
let ones = Tensor::ones((b, h, 1, d), DType::F32, device)?;
cache.slice_set(&ones, 2, 6)?;
let diff = cache.narrow(2, 5, 1)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
let diff = (cache.narrow(2, 6, 1)? - 1.)?
.abs()?
.sum_all()?
.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
// This used to create a deadlock rather than returning an actual error.
assert!(cache.slice_set(&cache, 0, 0).is_err());
Ok(())
}
fn cat(device: &Device) -> Result<()> {
// 1D
let t1 = Tensor::new(&[3f32, 1., 4.], device)?;
let t2 = Tensor::new(&[1f32, 5., 9., 2.], device)?;
let t3 = Tensor::new(&[6f32, 5., 3., 5., 8., 9.], device)?;
assert_eq!(Tensor::cat(&[&t1], 0)?.to_vec1::<f32>()?, [3f32, 1., 4.],);
assert_eq!(
Tensor::cat(&[&t1, &t2], 0)?.to_vec1::<f32>()?,
[3f32, 1., 4., 1., 5., 9., 2.],
);
assert_eq!(
Tensor::cat(&[&t1, &t2, &t3], 0)?.to_vec1::<f32>()?,
[3f32, 1., 4., 1., 5., 9., 2., 6., 5., 3., 5., 8., 9.],
);
// 2D
let data = &[[3f32, 1., 4., 1., 5.], [2., 7., 1., 8., 2.]];
let t1 = Tensor::new(data, device)?;
let data2 = &[[5f32, 5., 5., 5., 5.], [2., 7., 1., 8., 2.]];
let t2 = Tensor::new(data2, device)?;
assert_eq!(
Tensor::cat(&[&t1, &t2], 0)?.to_vec2::<f32>()?,
[
[3.0, 1.0, 4.0, 1.0, 5.0],
[2.0, 7.0, 1.0, 8.0, 2.0],
[5.0, 5.0, 5.0, 5.0, 5.0],
[2.0, 7.0, 1.0, 8.0, 2.0]
]
);
// PyTorch equivalent:
// import torch
// t1 = torch.tensor([[3, 1, 4, 1, 5], [2, 7, 1, 8, 2]])
// t2 = torch.tensor([[5]*5, [2, 7, 1, 8, 2]])
// torch.cat([t1.t(), t2.t()], dim=1).t()
assert_eq!(
Tensor::cat(&[&t1.t()?, &t2.t()?], 1)?
.t()?
.to_vec2::<f32>()?,
[
[3.0, 1.0, 4.0, 1.0, 5.0],
[2.0, 7.0, 1.0, 8.0, 2.0],
[5.0, 5.0, 5.0, 5.0, 5.0],
[2.0, 7.0, 1.0, 8.0, 2.0]
]
);
assert_eq!(
Tensor::cat(&[&t1, &t2], 1)?.to_vec2::<f32>()?,
[
[3.0, 1.0, 4.0, 1.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0],
[2.0, 7.0, 1.0, 8.0, 2.0, 2.0, 7.0, 1.0, 8.0, 2.0]
]
);
// 3D
let t1 = Tensor::arange(0, 48i64, device)?.reshape((2, 6, 4))?;
let t2 = Tensor::arange(100, 124i64, device)?.reshape((2, 3, 4))?;
let t3 = Tensor::arange(10000, 10032i64, device)?.reshape((2, 4, 4))?;
let t_cat = Tensor::cat(&[&t1, &t2, &t3], 1)?;
let t1 = t1.t()?.contiguous()?.t()?;
let t2 = t2.t()?.contiguous()?.t()?;
let t3 = t3.t()?.contiguous()?.t()?;
let t_cat2 = Tensor::cat(&[&t1, &t2, &t3], 1)?;
let diff = t_cat.eq(&t_cat2)?.to_dtype(DType::F32)?.sum_all()?;
assert_eq!(diff.to_vec0::<f32>()?, 104.0);
assert_eq!(t_cat.i((0, 0, 0))?.to_vec0::<i64>()?, 0);
assert_eq!(t_cat.i((0, 4, 0))?.to_vec0::<i64>()?, 16);
assert_eq!(t_cat.i((0, 5, 0))?.to_vec0::<i64>()?, 20);
assert_eq!(t_cat.i((1, 5, 0))?.to_vec0::<i64>()?, 44);
assert_eq!(t_cat.i((0, 6, 0))?.to_vec0::<i64>()?, 100);
assert_eq!(t_cat.i((1, 6, 0))?.to_vec0::<i64>()?, 112);
assert_eq!(t_cat.i((0, 6, 1))?.to_vec0::<i64>()?, 101);
assert_eq!(t_cat.i((0, 7, 1))?.to_vec0::<i64>()?, 105);
assert_eq!(t_cat.i((0, 12, 1))?.to_vec0::<i64>()?, 10013);
assert_eq!(t_cat.i((1, 12, 3))?.to_vec0::<i64>()?, 10031);
Ok(())
}
fn embeddings(device: &Device) -> Result<()> {
let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?;
let t = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?;
let hs = t.embedding(&ids)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]);
let hs = t.index_select(&ids, 0)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]);
let hs = t.index_select(&ids.to_dtype(DType::I64)?, 0)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]);
let ids = Tensor::new(&[u32::MAX, 2u32, u32::MAX], device)?;
let hs = t.index_select(&ids, 0)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]]);
Ok(())
}
#[test]
fn index_select_fail() -> Result<()> {
// Check that an error is properly reported on out of bounds.
let ids = Tensor::new(&[4u32, 2u32, 1u32], &Device::Cpu)?;
let t = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], &Device::Cpu)?;
let hs = t.index_select(&ids, 0);
assert!(hs.is_err());
Ok(())
}
// The test below triggers an unwinding panic as there is a panic within the
// #[cfg(feature = "cuda")]
// #[test]
// #[should_panic]
// fn index_select_fail_gpu() {
// // Check that a panic happens for out of bounds in cuda
// if let Ok(device) = Device::new_cuda(0) {
// if let Ok(ids) = Tensor::new(&[4u32, 2u32, 1u32], &device) {
// if let Ok(t) = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], &device) {
// let _ = t.index_select(&ids, 0);
// }
// }
// }
// }
fn cmp(device: &Device) -> Result<()> {
let t1 = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?;
let t2 = Tensor::new(&[[1f32, 0f32], [3f32, 3f32], [4f32, 7f32]], device)?;
assert_eq!(t1.eq(&t2)?.to_vec2::<u8>()?, &[[0, 0], [0, 1], [1, 0]]);
assert_eq!(t1.ne(&t2)?.to_vec2::<u8>()?, &[[1, 1], [1, 0], [0, 1]]);
assert_eq!(t1.le(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 1], [1, 1]]);
assert_eq!(t1.lt(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 0], [0, 1]]);
assert_eq!(t1.gt(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 0], [0, 0]]);
assert_eq!(t1.ge(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 1], [1, 0]]);
Ok(())
}
fn index_select(device: &Device) -> Result<()> {
let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?;
let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?;
assert_eq!(
t.to_vec2::<f32>()?,
&[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
[9.0, 10.0, 11.0]
]
);
for dtype in [DType::U8, DType::U32, DType::I64] {
let ids = ids.to_dtype(dtype)?;
let hs = t.index_select(&ids, 1)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[
[0.0, 2.0, 1.0],
[3.0, 5.0, 4.0],
[6.0, 8.0, 7.0],
[9.0, 11.0, 10.0]
]
);
let hs = t.index_select(&ids, 0)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[[0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0]]
);
// Prior to https://github.com/huggingface/candle/pull/1022
// There would be a bug where the last values in the result tensor would be set to 0.
let ids = Tensor::new(&[0u32, 2u32, 1u32, 0u32, 2u32, 1u32], device)?;
let hs = t.index_select(&ids, 0)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[
[0.0, 1.0, 2.0],
[6.0, 7.0, 8.0],
[3.0, 4.0, 5.0],
[0.0, 1.0, 2.0],
[6.0, 7.0, 8.0],
[3.0, 4.0, 5.0],
]
);
// Test when selecting dim > 0 with ids size different from elem count of
// target dim in source/input.
let ids = Tensor::new(&[1u32, 0u32, 1u32], device)?;
let t = Tensor::arange(1f32, 5f32, device)?.reshape((2, 2))?;
assert_eq!(t.to_vec2::<f32>()?, &[[1.0, 2.0], [3.0, 4.0]]);
let hs = t.index_select(&ids, 1)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[2.0, 1.0, 2.0], [4.0, 3.0, 4.0]]);
}
Ok(())
}
fn index_add(device: &Device) -> Result<()> {
let ids = Tensor::new(&[0u32, 1u32, 1u32], device)?;
let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?;
assert_eq!(
t.to_vec2::<f32>()?,
&[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
[9.0, 10.0, 11.0]
]
);
let init = Tensor::ones((4, 2), DType::F32, device)?;
let hs = init.index_add(&ids, &t, 1)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[[1.0, 4.0], [4.0, 10.0], [7.0, 16.0], [10.0, 22.0]],
);
let init = Tensor::zeros((4, 2), DType::F32, device)?;
let ids = Tensor::new(&[1u32, 0u32, 0u32], device)?;
let hs = init.index_add(&ids, &t, 1)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[[3.0, 0.0], [9.0, 3.0], [15.0, 6.0], [21.0, 9.0]],
);
let init = Tensor::zeros((6, 3), DType::F32, device)?;
let ids = Tensor::new(&[5u32, 0u32, 1u32, 0u32], device)?;
let hs = init.index_add(&ids, &t, 0)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[
[12.0, 14.0, 16.0],
[6.0, 7.0, 8.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 2.0]
]
);
Ok(())
}
fn slice_scatter(device: &Device) -> Result<()> {
let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?;
assert_eq!(
t.to_vec2::<f32>()?,
&[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
[9.0, 10.0, 11.0]
]
);
let src = Tensor::arange(100f32, 106f32, device)?.reshape((2, 3))?;
assert_eq!(
t.slice_scatter0(&src, 0)?.to_vec2::<f32>()?,
&[
[100.0, 101.0, 102.0],
[103.0, 104.0, 105.0],
[6.0, 7.0, 8.0],
[9.0, 10.0, 11.0]
]
);
assert_eq!(
t.slice_scatter0(&src, 1)?.to_vec2::<f32>()?,
&[
[0.0, 1.0, 2.0],
[100.0, 101.0, 102.0],
[103.0, 104.0, 105.0],
[9.0, 10.0, 11.0]
]
);
assert_eq!(
t.slice_scatter0(&src, 2)?.to_vec2::<f32>()?,
&[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[100.0, 101.0, 102.0],
[103.0, 104.0, 105.0],
]
);
Ok(())
}
fn scatter(device: &Device) -> Result<()> {
let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?;
assert_eq!(
t.to_vec2::<f32>()?,
&[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
[9.0, 10.0, 11.0]
]
);
let ids = Tensor::new(&[[0u32, 1, 2], [3, 4, 0], [3, 3, 1], [2, 0, 4]], device)?;
let init = Tensor::ones((4, 5), DType::F32, device)?;
let hs = init.scatter_add(&ids, &t, 1)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[
[1.0, 2.0, 3.0, 1.0, 1.0],
[6.0, 1.0, 1.0, 4.0, 5.0],
[1.0, 9.0, 1.0, 14.0, 1.0],
[11.0, 1.0, 10.0, 1.0, 12.0]
]
);
let hs = init.scatter(&ids, &t, 1)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[
[0.0, 1.0, 2.0, 1.0, 1.0],
[5.0, 1.0, 1.0, 3.0, 4.0],
[1.0, 8.0, 1.0, 7.0, 1.0],
[10.0, 1.0, 9.0, 1.0, 11.0]
]
);
let init = Tensor::ones((6, 3), DType::F32, device)?;
let hs = init.scatter_add(&ids, &t, 0)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[
[1.0, 11.0, 6.0],
[1.0, 2.0, 9.0],
[10.0, 1.0, 3.0],
[10.0, 8.0, 1.0],
[1.0, 5.0, 12.0],
[1.0, 1.0, 1.0]
]
);
let hs = init.scatter(&ids, &t, 0)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[
[0.0, 10.0, 5.0],
[1.0, 1.0, 8.0],
[9.0, 1.0, 2.0],
[6.0, 7.0, 1.0],
[1.0, 4.0, 11.0],
[1.0, 1.0, 1.0]
]
);
let hs = {
let ids = Tensor::new(
&[
[0u32, u32::MAX, 2],
[3, 4, u32::MAX],
[3, 3, 1],
[u32::MAX, u32::MAX, 4],
],
device,
)?;
init.scatter(&ids, &t, 0)?
};
assert_eq!(
hs.to_vec2::<f32>()?,
&[
[0.0, 1.0, 1.0],
[1.0, 1.0, 8.0],
[1.0, 1.0, 2.0],
[6.0, 7.0, 1.0],
[1.0, 4.0, 11.0],
[1.0, 1.0, 1.0]
]
);
init.scatter_set(&ids, &t, 0)?;
assert_eq!(
init.to_vec2::<f32>()?,
&[
[0.0, 10.0, 5.0],
[1.0, 1.0, 8.0],
[9.0, 1.0, 2.0],
[6.0, 7.0, 1.0],
[1.0, 4.0, 11.0],
[1.0, 1.0, 1.0]
]
);
Ok(())
}
fn gather(device: &Device) -> Result<()> {
let ids = Tensor::new(&[[0u32], [2u32], [1u32], [0u32]], device)?;
let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?;
assert_eq!(
t.to_vec2::<f32>()?,
&[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
[9.0, 10.0, 11.0]
]
);
let hs = t.gather(&ids, 1)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[0.0], [5.0], [7.0], [9.0]]);
let ids = Tensor::new(
&[[0u32, 0u32], [2u32, 0u32], [1u32, 1u32], [0u32, 2u32]],
device,
)?;
let hs = t.gather(&ids, 1)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[[0.0, 0.0], [5.0, 3.0], [7.0, 7.0], [9.0, 11.0]]
);
let ids = Tensor::new(&[[0u32, 2u32, 0u32]], device)?;
let hs = t.gather(&ids, 0)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 7.0, 2.0]]);
let ids = Tensor::new(&[[0u32, 2u32, 0u32], [0u32, 1u32, 1u32]], device)?;
let hs = t.gather(&ids, 0)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 7.0, 2.0], [0.0, 4.0, 5.0]]);
let hs = {
let ids = Tensor::new(
&[
[0u32, 0u32],
[2u32, u32::MAX],
[u32::MAX, 1u32],
[0u32, 2u32],
],
device,
)?;
t.gather(&ids, 1)?
};
assert_eq!(
hs.to_vec2::<f32>()?,
&[[0.0, 0.0], [5.0, 0.0], [0.0, 7.0], [9.0, 11.0]]
);
// Random data
// Dim: 0
let t = Tensor::new(
&[
[
[108_f32, -47., 16., -56., -83., -130., 210.],
[253., 95., 151., 228., -210., -123., -127.],
[-9., -217., 2., -78., 163., 245., -204.],
[-246., 79., -238., 88., -226., -184., 171.],
[8., -48., -153., 234., -34., 166., -153.],
[124., 0., -10., -61., -242., -15., -238.],
],
[
[12., -64., -199., 244., -240., 156., -128.],
[173., -57., 4., -198., 233., -110., 238.],
[95., 82., 0., 240., 53., -211., 209.],
[-122., 167., -212., 227., -144., 61., 118.],
[-63., -146., 200., 244., 168., -167., 116.],
[-125., -147., 110., -253., -178., -250., -18.],
],
[
[57., 86., -50., 56., 92., 205., -78.],
[-137., -156., -18., 248., -61., -239., 14.],
[-248., -30., -50., -70., -251., 250., -83.],
[-221., 67., 72., 59., -24., -154., 232.],
[-144., -23., -74., 5., 93., 171., 205.],
[46., -77., -38., -226., 246., 161., -17.],
],
[
[-153., -231., -236., 161., 126., 2., -22.],
[-229., -41., 209., 164., 234., 160., 57.],
[223., 254., -186., -162., -46., -160., -102.],
[65., 30., 213., -253., 59., 224., -154.],
[-82., -203., -177., 17., 31., -256., -246.],
[176., -135., -65., 54., -56., 210., 76.],
],
[
[-10., -245., 168., 124., -14., -33., -178.],
[25., -43., -39., 132., -89., 169., 179.],
[187., -215., 32., -133., 87., -7., -168.],
[-224., -215., -5., -230., -58., -162., 128.],
[158., -137., -122., -100., -202., -83., 136.],
[30., -185., -144., 250., 209., -40., 127.],
],
[
[-196., 108., -245., 122., 146., -228., 62.],
[-1., -66., 160., 137., 13., -172., -21.],
[244., 199., -164., 28., 119., -175., 198.],
[-62., 253., -162., 195., -95., -230., -211.],
[123., -72., -26., -107., -139., 64., 245.],
[11., -126., -182., 108., -12., 184., -127.],
],
[
[-159., 126., 176., 161., 73., -111., -138.],
[-187., 214., -217., -33., -223., -201., -212.],
[-61., -120., -166., -172., -95., 53., 196.],
[-33., 86., 134., -152., 154., -53., 74.],
[186., -28., -154., -174., 141., -109., 217.],
[82., 35., 252., 145., 181., 74., -87.],
],
],
device,
)?;
let ids = Tensor::new(
&[
[
[6_u32, 6, 4, 3, 4, 4, 6],
[3, 3, 2, 4, 4, 4, 6],
[3, 3, 0, 2, 4, 6, 4],
[2, 5, 1, 2, 6, 6, 1],
[2, 1, 6, 5, 3, 2, 3],
[6, 1, 0, 1, 0, 2, 6],
],
[
[4, 6, 4, 3, 3, 3, 2],
[4, 3, 2, 4, 4, 4, 6],
[2, 3, 0, 2, 4, 6, 4],
[6, 5, 1, 2, 6, 6, 1],
[4, 1, 6, 5, 3, 2, 3],
[1, 1, 0, 1, 0, 2, 6],
],
[
[3, 6, 4, 3, 3, 3, 2],
[2, 3, 2, 4, 4, 4, 6],
[4, 3, 0, 2, 4, 6, 4],
[0, 5, 1, 2, 6, 6, 1],
[6, 1, 6, 5, 3, 2, 3],
[4, 1, 0, 1, 0, 2, 6],
],
[
[0, 6, 4, 3, 3, 3, 2],
[5, 3, 2, 4, 4, 4, 6],
[0, 3, 0, 2, 4, 6, 4],
[3, 5, 1, 2, 6, 6, 1],
[0, 1, 6, 5, 3, 2, 3],
[3, 1, 0, 1, 0, 2, 6],
],
],
device,
)?;
let hs = t.gather(&ids, 0)?;
assert_eq!(
hs.to_vec3::<f32>()?,
&[
[
[-159_f32, 126., 168., 161., -14., -33., -138.],
[-229., -41., -18., 132., -89., 169., -212.],
[223., 254., 2., -70., 87., 53., -168.],
[-221., 253., -212., 59., 154., -53., 118.],
[-144., -146., -154., -107., 31., 171., -246.],
[82., -147., -10., -253., -242., 161., -87.]
],
[
[-10., 126., 168., 161., 126., 2., -78.],
[25., -41., -18., 132., -89., 169., -212.],
[-248., 254., 2., -70., 87., 53., -168.],
[-33., 253., -212., 59., 154., -53., 118.],
[158., -146., -154., -107., 31., 171., -246.],
[-125., -147., -10., -253., -242., 161., -87.]
],
[
[-153., 126., 168., 161., 126., 2., -78.],
[-137., -41., -18., 132., -89., 169., -212.],
[187., 254., 2., -70., 87., 53., -168.],
[-246., 253., -212., 59., 154., -53., 118.],
[186., -146., -154., -107., 31., 171., -246.],
[30., -147., -10., -253., -242., 161., -87.]
],
[
[108., 126., 168., 161., 126., 2., -78.],
[-1., -41., -18., 132., -89., 169., -212.],
[-9., 254., 2., -70., 87., 53., -168.],
[65., 253., -212., 59., 154., -53., 118.],
[8., -146., -154., -107., 31., 171., -246.],
[176., -147., -10., -253., -242., 161., -87.]
]
]
);
// Dim: 1
let t = Tensor::new(
&[
[
[-117_f32, -175., 69., -163.],
[200., 242., -21., -67.],
[179., 150., -126., -75.],
[-118., 38., -138., -13.],
[-221., 136., -185., 180.],
[58., 182., -204., -149.],
],
[
[3., -148., -58., -154.],
[-43., 45., -108., 4.],
[-69., -249., -71., -21.],
[80., 110., -152., -235.],
[-88., 7., 92., -250.],
[-186., 207., -242., 98.],
],
[
[238., 19., 64., -242.],
[-150., -97., 218., 58.],
[111., -233., 204., -212.],
[-242., -232., 83., 42.],
[153., 62., -251., 219.],
[-117., 36., -119., 10.],
],
[
[215., 159., -169., -27.],
[-83., 101., -88., 169.],
[-205., 93., 225., -64.],
[-162., 240., 214., 23.],
[-112., 6., 21., 245.],
[-38., 113., 93., 215.],
],
[
[91., -188., -148., 101.],
[74., 203., -35., 55.],
[-116., -130., -153., -96.],
[58., 22., -45., -194.],
[-221., -134., 73., 159.],
[-203., -254., 31., 235.],
],
[
[105., -53., 61., 186.],
[-195., 234., 75., -1.],
[51., 139., 160., -108.],
[-173., -167., 161., 19.],
[83., -246., 156., -222.],
[109., 39., -149., 137.],
],
],
device,
)?;
let ids = Tensor::new(
&[
[[4_u32, 4, 4, 2]],
[[0, 4, 4, 3]],
[[1, 5, 3, 4]],
[[0, 3, 3, 2]],
[[1, 1, 5, 2]],
[[1, 4, 5, 4]],
],
device,
)?;
let hs = t.gather(&ids, 1)?;
assert_eq!(
hs.to_vec3::<f32>()?,
&[
[[-221., 136., -185., -75.]],
[[3., 7., 92., -235.]],
[[-150., 36., 83., 219.]],
[[215., 240., 214., -64.]],
[[74., 203., 31., -96.]],
[[-195., -246., -149., -222.]]
]
);
// Dim: 2
let t = Tensor::new(
&[
[[-162_f32, 202.], [-126., -39.], [35., -65.], [1., 80.]],
[[37., 248.], [-191., 89.], [117., -40.], [-217., 220.]],
],
device,
)?;
let ids = Tensor::new(&[[[1_u32], [0], [1], [1]], [[0], [1], [0], [1]]], device)?;
let hs = t.gather(&ids, 2)?;
assert_eq!(
hs.to_vec3::<f32>()?,
&[
[[202.], [-126.], [-65.], [80.]],
[[37.], [89.], [117.], [220.]]
]
);
let t = Tensor::new(
&[
[[-21_f32, -197.], [194., 122.]],
[[255., -106.], [-191., 250.]],
[[33., -117.], [43., 10.]],
[[-130., 238.], [-217., -92.]],
],
device,
)?;
let ids = Tensor::new(
&[
[[0_u32, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 1], [0, 1]],
[[1, 0], [1, 0]],
],
device,
)?;
let hs = t.gather(&ids, 2)?;
assert_eq!(
hs.to_vec3::<f32>()?,
&[
[[-21., -197.], [122., 194.]],
[[-106., 255.], [-191., 250.]],
[[33., -117.], [43., 10.]],
[[238., -130.], [-92., -217.]]
]
);
Ok(())
}
fn broadcasting(device: &Device) -> Result<()> {
let t1 = Tensor::arange(0f32, 24f32, device)?.reshape((4, 2, 3))?;
let t2 = Tensor::new(&[100f32, 200f32], device)?;
let s = t1.broadcast_add(&t2.reshape((2, 1))?)?;
assert_eq!(
s.to_vec3::<f32>()?,
&[
[[100.0, 101.0, 102.0], [203.0, 204.0, 205.0]],
[[106.0, 107.0, 108.0], [209.0, 210.0, 211.0]],
[[112.0, 113.0, 114.0], [215.0, 216.0, 217.0]],
[[118.0, 119.0, 120.0], [221.0, 222.0, 223.0]]
]
);
let s = t1.t()?.broadcast_add(&t2)?;
assert_eq!(
s.to_vec3::<f32>()?,
&[
[[100.0, 203.0], [101.0, 204.0], [102.0, 205.0]],
[[106.0, 209.0], [107.0, 210.0], [108.0, 211.0]],
[[112.0, 215.0], [113.0, 216.0], [114.0, 217.0]],
[[118.0, 221.0], [119.0, 222.0], [120.0, 223.0]]
]
);
let s = t1.broadcast_sub(&t2.reshape((2, 1))?)?;
assert_eq!(
s.to_vec3::<f32>()?,
&[
[[-100.0, -99.0, -98.0], [-197.0, -196.0, -195.0]],
[[-94.0, -93.0, -92.0], [-191.0, -190.0, -189.0]],
[[-88.0, -87.0, -86.0], [-185.0, -184.0, -183.0]],
[[-82.0, -81.0, -80.0], [-179.0, -178.0, -177.0]]
]
);
let s = t1.t()?.broadcast_sub(&t2)?;
assert_eq!(
s.to_vec3::<f32>()?,
&[
[[-100.0, -197.0], [-99.0, -196.0], [-98.0, -195.0]],
[[-94.0, -191.0], [-93.0, -190.0], [-92.0, -189.0]],
[[-88.0, -185.0], [-87.0, -184.0], [-86.0, -183.0]],
[[-82.0, -179.0], [-81.0, -178.0], [-80.0, -177.0]]
]
);
// Test a narrowed version as this uses a layout start_offset.
let t1 = t1.i(2..)?;
let s = t1.broadcast_add(&t2.reshape((2, 1))?)?;
assert_eq!(
s.to_vec3::<f32>()?,
&[
[[112.0, 113.0, 114.0], [215.0, 216.0, 217.0]],
[[118.0, 119.0, 120.0], [221.0, 222.0, 223.0]]
]
);
let s = t1.t()?.broadcast_add(&t2)?;
assert_eq!(
s.to_vec3::<f32>()?,
&[
[[112.0, 215.0], [113.0, 216.0], [114.0, 217.0]],
[[118.0, 221.0], [119.0, 222.0], [120.0, 223.0]]
]
);
let s = t1.broadcast_sub(&t2.reshape((2, 1))?)?;
assert_eq!(
s.to_vec3::<f32>()?,
&[
[[-88.0, -87.0, -86.0], [-185.0, -184.0, -183.0]],
[[-82.0, -81.0, -80.0], [-179.0, -178.0, -177.0]]
]
);
let s = t1.t()?.broadcast_sub(&t2)?;
assert_eq!(
s.to_vec3::<f32>()?,
&[
[[-88.0, -185.0], [-87.0, -184.0], [-86.0, -183.0]],
[[-82.0, -179.0], [-81.0, -178.0], [-80.0, -177.0]]
]
);
let t3 = Tensor::new(1f32, device)?.broadcast_div(&t2)?;
let s = t1.broadcast_mul(&t2.reshape((2, 1))?)?;
let s_div = t1.broadcast_div(&t3.reshape((2, 1))?)?;
assert_eq!(
s.to_vec3::<f32>()?,
&[
[[1200.0, 1300.0, 1400.0], [3000.0, 3200.0, 3400.0]],
[[1800.0, 1900.0, 2000.0], [4200.0, 4400.0, 4600.0]]
]
);
assert_eq!(s.to_vec3::<f32>()?, s_div.to_vec3::<f32>()?,);
let s = t1.t()?.broadcast_mul(&t2)?;
let s_div = t1.t()?.broadcast_div(&t3)?;
assert_eq!(
s.to_vec3::<f32>()?,
&[
[[1200.0, 3000.0], [1300.0, 3200.0], [1400.0, 3400.0]],
[[1800.0, 4200.0], [1900.0, 4400.0], [2000.0, 4600.0]]
]
);
assert_eq!(s.to_vec3::<f32>()?, s_div.to_vec3::<f32>()?,);
Ok(())
}
fn randn(device: &Device) -> Result<()> {
let tensor = Tensor::randn(0f32, 1f32, (5, 3), device)?;
assert_eq!(tensor.dims(), [5, 3]);
// Check that the seed gets updated by checking that
// a new series of numbers is generated each time
let tensor2 = Tensor::randn(0f32, 1f32, (5, 3), device)?;
assert_ne!(tensor.to_vec2::<f32>()?, tensor2.to_vec2::<f32>()?);
let tensor = Tensor::rand(0f32, 1f32, (5, 3), device)?;
assert_eq!(tensor.dims(), [5, 3]);
// Check that the seed gets updated by checking that
// a new series of numbers is generated each time
let tensor2 = Tensor::rand(0f32, 1f32, (5, 3), device)?;
assert_ne!(tensor.to_vec2::<f32>()?, tensor2.to_vec2::<f32>()?);
// We do not expect deterministic elements at any index.
// There once was a bug that had a deterministic zero element in evenly sized tensors.
const N: usize = 2;
let v = (0..100)
.map(|_| Tensor::randn(0f32, 1f32, N, device).and_then(|t| t.to_vec1::<f32>()))
.collect::<Result<Vec<_>>>()?;
assert!(
(0..N).all(|i| v.windows(2).any(|pair| pair[0][i] != pair[1][i])),
"There are deterministic values in the randn tensors"
);
let v = (0..100)
.map(|_| Tensor::rand(0f32, 1f32, N, device).and_then(|t| t.to_vec1::<f32>()))
.collect::<Result<Vec<_>>>()?;
assert!(
(0..N).all(|i| v.windows(2).any(|pair| pair[0][i] != pair[1][i])),
"There are deterministic values in the rand tensors"
);
Ok(())
}
fn zero_dim(device: &Device) -> Result<()> {
let t = Tensor::zeros((4, 0, 1), DType::F32, device)?;
assert_eq!(t.dims3()?, (4, 0, 1));
let t2 = Tensor::zeros((4, 3, 1), DType::F32, device)?;
let t_cat = Tensor::cat(&[&t, &t2], 1)?;
assert_eq!(t_cat.dims3()?, (4, 3, 1));
let t_cat = Tensor::cat(&[&t, &t], 1)?;
assert_eq!(t_cat.dims3()?, (4, 0, 1));
let t_unary = t.sqrt()?;
assert_eq!(t_unary.dims3()?, (4, 0, 1));
let t_plus = (&t + 1.)?;
assert_eq!(t_plus.dims3()?, (4, 0, 1));
let t_mm = t2.matmul(&t.t()?)?;
assert_eq!(t_mm.dims3()?, (4, 3, 0));
let t_mm = t.matmul(&t2.t()?)?;
assert_eq!(t_mm.dims3()?, (4, 0, 3));
let t_mm = t.t()?.matmul(&t)?;
assert_eq!(t_mm.dims3()?, (4, 1, 1));
Ok(())
}
test_device!(zeros, zeros_cpu, zeros_gpu, zeros_metal);
test_device!(ones, ones_cpu, ones_gpu, ones_metal);
test_device!(full, full_cpu, full_gpu, full_metal);
test_device!(const_set, cs_cpu, cs_gpu, cs_metal);
test_device!(arange, arange_cpu, arange_gpu, arange_metal);
test_device!(add_mul, add_mul_cpu, add_mul_gpu, add_mul_metal);
test_device!(tensor_2d, tensor_2d_cpu, tensor_2d_gpu, tensor_2d_metal);
test_device!(narrow, narrow_cpu, narrow_gpu, narrow_metal);
test_device!(broadcast, broadcast_cpu, broadcast_gpu, broadcast_metal);
test_device!(slice_set, ss_cpu, ss_gpu, ss_metal);
test_device!(cat, cat_cpu, cat_gpu, cat_metal);
test_device!(sum, sum_cpu, sum_gpu, sum_metal);
test_device!(min, min_cpu, min_gpu, min_metal);
test_device!(max, max_cpu, max_gpu, max_metal);
test_device!(argmax, argmax_cpu, argmax_gpu, argmax_metal);
test_device!(argmin, argmin_cpu, argmin_gpu, argmin_metal);
test_device!(transpose, transpose_cpu, transpose_gpu, transpose_metal);
test_device!(unary_op, unary_op_cpu, unary_op_gpu, unary_op_metal);
test_device!(binary_op, binary_op_cpu, binary_op_gpu, binary_op_metal);
test_device!(embeddings, embeddings_cpu, embeddings_gpu, embeddings_metal);
test_device!(cmp, cmp_cpu, cmp_gpu, cmp_metal);
test_device!(
broadcasting,
broadcasting_cpu,
broadcasting_gpu,
broadcasting_metal
);
test_device!(
index_select,
index_select_cpu,
index_select_gpu,
index_select_metal
);
test_device!(index_add, index_add_cpu, index_add_gpu, index_add_metal);
test_device!(gather, gather_cpu, gather_gpu, gather_metal);
test_device!(scatter, scatter_cpu, scatter_gpu, scatter_metal);
test_device!(
slice_scatter,
slice_scatter_cpu,
slice_scatter_gpu,
slice_scatter_metal
);
test_device!(randn, randn_cpu, randn_gpu, randn_metal);
test_device!(clamp, clamp_cpu, clamp_gpu, clamp_metal);
test_device!(asort, asort_cpu, asort_gpu, asort_metal);
test_device!(var, var_cpu, var_gpu, var_metal);
test_device!(zero_dim, zero_dim_cpu, zero_dim_gpu, zero_dim_metal);
// There was originally a bug on the CPU implementation for randn
// https://github.com/huggingface/candle/issues/381
#[test]
fn randn_hasneg() -> Result<()> {
let t = Tensor::randn(0f32, 1f32, 200, &Device::Cpu)?.to_vec1::<f32>()?;
if t.iter().all(|&v| v >= 0.) {
candle_core::bail!("all values in tensors are non-negative")
}
Ok(())
}
#[test]
fn pad_with_same() -> Result<()> {
let t = Tensor::arange(1f32, 5f32, &Device::Cpu)?.reshape((2, 2))?;
let t0 = t.pad_with_same(0, 1, 2)?;
assert_eq!(
t0.to_vec2::<f32>()?,
[[1.0, 2.0], [1.0, 2.0], [3.0, 4.0], [3.0, 4.0], [3.0, 4.0]]
);
let t1 = t.pad_with_same(1, 1, 2)?;
assert_eq!(
t1.to_vec2::<f32>()?,
[[1.0, 1.0, 2.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0, 4.0]]
);
Ok(())
}
#[test]
fn i64_abs() -> Result<()> {
let t = Tensor::new(&[-42i64, 1337], &Device::Cpu)?;
let t = t.abs()?;
assert_eq!(t.to_vec1::<i64>()?, [42, 1337]);
Ok(())
}
#[test]
fn tril_triu_eye() -> Result<()> {
let t = Tensor::tril2(4, DType::F32, &Device::Cpu)?;
assert_eq!(
t.to_vec2::<f32>()?,
[
[1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 1.0, 1.0]
],
);
let t = Tensor::triu2(4, DType::F32, &Device::Cpu)?;
assert_eq!(
t.to_vec2::<f32>()?,
[
[1.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0]
]
);
let t = Tensor::eye(4, DType::F32, &Device::Cpu)?;
assert_eq!(
t.to_vec2::<f32>()?,
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]
]
);
Ok(())
}
#[test]
fn cumsum() -> Result<()> {
let t = &[3f32, 1., 4., 1., 5.];
let t = Tensor::new(t, &Device::Cpu)?;
assert_eq!(t.cumsum(0)?.to_vec1::<f32>()?, [3., 4., 8., 9., 14.]);
let t = t.unsqueeze(1)?;
assert_eq!(
t.cumsum(0)?.to_vec2::<f32>()?,
[[3.0], [4.0], [8.0], [9.0], [14.0]]
);
assert_eq!(
t.cumsum(1)?.to_vec2::<f32>()?,
[[3.0], [1.0], [4.0], [1.0], [5.0]]
);
let t = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]];
let t = Tensor::new(t, &Device::Cpu)?;
assert_eq!(
t.cumsum(1)?.to_vec2::<f32>()?,
[[3.0, 4.0, 8.0, 9.0, 14.0], [2.0, 3.0, 10.0, 18.0, 20.0]],
);
assert_eq!(
t.cumsum(0)?.to_vec2::<f32>()?,
[[3.0, 1.0, 4.0, 1.0, 5.0], [5.0, 2.0, 11.0, 9.0, 7.0]]
);
Ok(())
}
/// A helper function for floating point comparison. Both a and b must be 1D Tensor and contains the same amount of data.
/// Assertion passes if the difference of all pairs of a and b is smaller than epsilon.
fn assert_close(a: &Tensor, b: &Tensor, epsilon: f64) -> Result<()> {
let a_vec: Vec<f64> = a.to_vec1()?;
let b_vec: Vec<f64> = b.to_vec1()?;
assert_eq!(a_vec.len(), b_vec.len());
for (a, b) in a_vec.iter().zip(b_vec.iter()) {
assert!((a - b).abs() < epsilon);
}
Ok(())
}
#[test]
fn log_sum_exp() -> Result<()> {
let input = Tensor::new(
&[
[[1f64, 2., 3.], [4., 5., 6.]],
[[-1000.0, -999.0, -1001.0], [1000.0, 999.0, 1001.0]],
],
&Device::Cpu,
)?;
let output = input.log_sum_exp(D::Minus1)?;
// The expectations obtained from pytorch.
let expected = Tensor::new(&[[3.4076, 6.4076], [-998.5924, 1001.4076]], &Device::Cpu)?;
assert_eq!(output.dims(), expected.dims());
assert_close(&output.flatten_all()?, &expected.flatten_all()?, 0.00001)?;
assert_eq!(
input.log_sum_exp((0, 1))?.to_vec1::<f64>()?,
[1000.0, 999.0, 1001.0]
);
assert_eq!(
input.log_sum_exp(())?.to_vec3::<f64>()?,
input.to_vec3::<f64>()?
);
Ok(())
}
#[test]
fn pow() -> Result<()> {
let lhs = Tensor::new(&[[1f32, 2., 3.], [4., 5., 6.]], &Device::Cpu)?;
let rhs = (&lhs - 2.)?;
let res = lhs.pow(&rhs)?;
assert_eq!(
test_utils::to_vec2_round(&res, 3)?,
[[1.0, 1.0, 3.0], [16.0, 125.0, 1296.0]]
);
Ok(())
}
#[test]
fn test_flip_1d() -> Result<()> {
// 1D: [0, 1, 2, 3, 4]
let t = Tensor::arange(0.0, 5.0, &Device::Cpu)?.reshape((5,))?;
let flipped = t.flip(&[0])?;
// Expected: [4, 3, 2, 1, 0]
let expected = Tensor::from_vec(vec![4.0, 3.0, 2.0, 1.0, 0.0], (5,), &Device::Cpu)?;
candle_core::test_utils::assert_tensor_eq(&flipped, &expected)?;
Ok(())
}
#[test]
fn test_flip_2d() -> Result<()> {
// 2D:
// [[0, 1, 2],
// [3, 4, 5]]
let t = Tensor::arange(0.0, 6.0, &Device::Cpu)?.reshape((2, 3))?;
let flipped = t.flip(&[0, 1])?;
// Expected:
// [[5, 4, 3],
// [2, 1, 0]]
let expected = Tensor::from_vec(vec![5.0, 4.0, 3.0, 2.0, 1.0, 0.0], (2, 3), &Device::Cpu)?;
candle_core::test_utils::assert_tensor_eq(&flipped, &expected)?;
Ok(())
}
#[test]
fn test_flip_3d_channels() -> Result<()> {
// 3D:
// [[[0,1,2],
// [3,4,5]],
//
// [[6,7,8],
// [9,10,11]]]
let t = Tensor::arange(0.0, 12.0, &Device::Cpu)?.reshape((2, 2, 3))?;
let flipped = t.flip(&[2])?;
// Expected:
// [[[2,1,0],
// [5,4,3]],
//
// [[8,7,6],
// [11,10,9]]]
let expected = Tensor::from_vec(
vec![2.0, 1.0, 0.0, 5.0, 4.0, 3.0, 8.0, 7.0, 6.0, 11.0, 10.0, 9.0],
(2, 2, 3),
&Device::Cpu,
)?;
candle_core::test_utils::assert_tensor_eq(&flipped, &expected)?;
Ok(())
}
#[test]
fn tensor_new() -> Result<()> {
let t1 = Tensor::new(vec![1f32, 2.0, 3.0], &Device::Cpu)?;
assert_eq!(t1.to_vec1::<f32>()?, [1.0, 2.0, 3.0]);
let t2 = Tensor::new(vec![vec![1f32, 2., 3.], vec![4., 5., 6.]], &Device::Cpu)?;
assert_eq!(t2.to_vec2::<f32>()?, [[1., 2., 3.], [4., 5., 6.]]);
let t3 = Tensor::new(
vec![
vec![vec![1f32, 2., 3.], vec![4., 5., 6.]],
vec![vec![3f32, 1., 4.], vec![1., 5., 9.]],
],
&Device::Cpu,
)?;
assert_eq!(
t3.to_vec3::<f32>()?,
[
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[3.0, 1.0, 4.0], [1.0, 5.0, 9.0]]
]
);
Ok(())
}
#[test]
fn tensor_norm() -> Result<()> {
let t = Tensor::new(&[[3., 4.], [0., 0.]], &Device::Cpu)?;
let norm = t.norm()?;
assert_eq!(norm.to_scalar::<f64>()?, 5.);
Ok(())
}
| candle/candle-core/tests/tensor_tests.rs/0 | {
"file_path": "candle/candle-core/tests/tensor_tests.rs",
"repo_id": "candle",
"token_count": 37348
} | 33 |
[package]
name = "candle-examples"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
readme = "README.md"
[dependencies]
accelerate-src = { workspace = true, optional = true }
candle = { workspace = true }
candle-datasets = { workspace = true, optional = true }
candle-nn = { workspace = true }
candle-transformers = { workspace = true }
candle-flash-attn = { workspace = true, optional = true }
candle-onnx = { workspace = true, optional = true }
csv = "1.3.0"
cudarc = { workspace = true, optional = true }
half = { workspace = true, optional = true }
hf-hub = { workspace = true, features = ["tokio"] }
image = { workspace = true }
intel-mkl-src = { workspace = true, optional = true }
num-traits = { workspace = true }
palette = { version = "0.7.6", optional = true }
enterpolation = { version = "0.2.1", optional = true }
pyo3 = { version = "0.22.0", features = [
"auto-initialize",
"abi3-py311",
], optional = true }
rayon = { workspace = true }
rubato = { version = "0.15.0", optional = true }
safetensors = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
symphonia = { version = "0.5.3", features = ["all"], optional = true }
tokenizers = { workspace = true, features = ["onig"] }
cpal = { version = "0.15.2", optional = true }
pdf2image = { version = "0.1.2", optional = true }
tekken-rs = { version = "0.1.1", optional = true }
[dev-dependencies]
anyhow = { workspace = true }
byteorder = { workspace = true }
clap = { workspace = true }
imageproc = { workspace = true }
memmap2 = { workspace = true }
rand = { workspace = true }
ab_glyph = { workspace = true }
tracing = { workspace = true }
tracing-chrome = { workspace = true }
tracing-subscriber = { workspace = true }
# Necessary to disambiguate with tokio in wasm examples which are 1.28.1
tokio = "1.43.0"
[build-dependencies]
anyhow = { workspace = true }
bindgen_cuda = { version = "0.1.1", optional = true }
[features]
default = []
accelerate = [
"dep:accelerate-src",
"candle/accelerate",
"candle-nn/accelerate",
"candle-transformers/accelerate",
]
cuda = [
"candle/cuda",
"candle-nn/cuda",
"candle-transformers/cuda",
"dep:bindgen_cuda",
]
cudnn = ["candle/cudnn", "candle-nn/cudnn", "candle-transformers/cudnn"]
flash-attn = ["cuda", "candle-transformers/flash-attn", "dep:candle-flash-attn"]
mkl = [
"dep:intel-mkl-src",
"candle/mkl",
"candle-nn/mkl",
"candle-transformers/mkl",
]
nccl = ["cuda", "cudarc/nccl", "dep:half"]
onnx = ["candle-onnx"]
metal = ["candle/metal", "candle-nn/metal"]
microphone = ["cpal", "rubato"]
encodec = ["cpal", "symphonia", "rubato"]
mimi = ["cpal", "symphonia", "rubato"]
snac = ["cpal", "symphonia", "rubato"]
depth_anything_v2 = ["palette", "enterpolation"]
tekken = ["tekken-rs"]
[[example]]
name = "llama_multiprocess"
required-features = ["cuda", "nccl", "flash-attn"]
[[example]]
name = "reinforcement-learning"
required-features = ["pyo3"]
[[example]]
name = "onnx"
required-features = ["onnx"]
[[example]]
name = "onnx-llm"
required-features = ["onnx"]
[[example]]
name = "onnx_basics"
required-features = ["onnx"]
[[example]]
name = "whisper"
required-features = ["symphonia"]
[[example]]
name = "whisper-microphone"
required-features = ["microphone"]
[[example]]
name = "mnist-training"
required-features = ["candle-datasets"]
[[example]]
name = "llama2-c"
required-features = ["candle-datasets"]
[[example]]
name = "mimi"
required-features = ["mimi"]
[[example]]
name = "snac"
required-features = ["snac"]
[[example]]
name = "encodec"
required-features = ["encodec"]
[[example]]
name = "depth_anything_v2"
required-features = ["depth_anything_v2"]
[[example]]
name = "silero-vad"
required-features = ["onnx"]
[[example]]
name = "colpali"
required-features = ["pdf2image"]
[[example]]
name = "voxtral"
required-features = ["symphonia"]
| candle/candle-examples/Cargo.toml/0 | {
"file_path": "candle/candle-examples/Cargo.toml",
"repo_id": "candle",
"token_count": 1497
} | 34 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::{DType, Device, Tensor};
use candle_nn as nn;
use candle_transformers::models::chinese_clip::{ChineseClipConfig, ChineseClipModel};
use clap::Parser;
use tokenizers::Tokenizer;
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
tokenizer: Option<String>,
#[arg(long, use_value_delimiter = true)]
images: Option<Vec<String>>,
#[arg(long)]
cpu: bool,
#[arg(long, use_value_delimiter = true)]
sequences: Option<Vec<String>>,
}
fn main() -> anyhow::Result<()> {
let args = Args::parse();
tracing_subscriber::fmt::init();
let device = candle_examples::device(args.cpu)?;
let var = load_weights(args.model, &device)?;
let clip_model = ChineseClipModel::new(var, &ChineseClipConfig::clip_vit_base_patch16())?;
tracing::info!("Transformer loaded. ");
let (pixel_values, vec_imgs) = load_images(args.images, &device)?;
tracing::info!("Images loaded. ");
let tokenizer = load_tokenizer()?;
let (input_ids, type_ids, attention_mask, text_sequences) =
tokenize_sequences(args.sequences, &tokenizer, &device)?;
tracing::info!("Computing ... ");
let (_logits_per_text, logits_per_image) = clip_model.forward(
&pixel_values,
&input_ids,
Some(&type_ids),
Some(&attention_mask),
)?;
let softmax_image = nn::ops::softmax(&logits_per_image, 1)?;
let softmax_image_vec = softmax_image.flatten_all()?.to_vec1::<f32>()?;
let probability_vec = softmax_image_vec
.iter()
.map(|v| v * 100.0)
.collect::<Vec<f32>>();
let probability_per_image = probability_vec.len() / vec_imgs.len();
for (i, img) in vec_imgs.iter().enumerate() {
let start = i * probability_per_image;
let end = start + probability_per_image;
let prob = &probability_vec[start..end];
tracing::info!("\n\nResults for image: {}\n", img);
for (i, p) in prob.iter().enumerate() {
tracing::info!("Probability: {:.4}% Text: {} ", p, text_sequences[i]);
}
}
Ok(())
}
pub fn load_weights(model: Option<String>, device: &Device) -> anyhow::Result<nn::VarBuilder<'_>> {
let model_file = match model {
None => {
let api = hf_hub::api::sync::Api::new()?;
let repo = hf_hub::Repo::with_revision(
"OFA-Sys/chinese-clip-vit-base-patch16".to_string(),
hf_hub::RepoType::Model,
"refs/pr/3".to_string(),
);
let api = api.repo(repo);
api.get("model.safetensors")?
}
Some(model) => model.into(),
};
Ok(unsafe { nn::VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, device)? })
}
pub fn load_tokenizer() -> anyhow::Result<Tokenizer> {
let tokenizer_file = {
let api = hf_hub::api::sync::Api::new()?;
let repo = hf_hub::Repo::with_revision(
"OFA-Sys/chinese-clip-vit-base-patch16".to_string(),
hf_hub::RepoType::Model,
"refs/pr/3".to_string(),
);
let api = api.repo(repo);
api.get("tokenizer.json")?
};
Tokenizer::from_file(tokenizer_file).map_err(anyhow::Error::msg)
}
pub fn tokenize_sequences(
sequences: Option<Vec<String>>,
tokenizer: &Tokenizer,
device: &Device,
) -> anyhow::Result<(Tensor, Tensor, Tensor, Vec<String>)> {
let vec_seq = match sequences {
Some(seq) => seq,
None => vec![
"自行车比赛".to_string(),
"两只猫咪".to_string(),
"拿着蜡烛的机器人".to_string(),
],
};
let mut input_ids = vec![];
let mut type_ids = vec![];
let mut attention_mask = vec![];
let mut max_len = 0;
for seq in vec_seq.clone() {
let encoding = tokenizer.encode(seq, true).map_err(anyhow::Error::msg)?;
input_ids.push(encoding.get_ids().to_vec());
type_ids.push(encoding.get_type_ids().to_vec());
attention_mask.push(encoding.get_attention_mask().to_vec());
if encoding.get_ids().len() > max_len {
max_len = encoding.get_ids().len();
}
}
let pad_id = *tokenizer
.get_vocab(true)
.get("[PAD]")
.ok_or(anyhow::Error::msg("No pad token"))?;
let input_ids: Vec<Vec<u32>> = input_ids
.iter_mut()
.map(|item| {
item.extend(vec![pad_id; max_len - item.len()]);
item.to_vec()
})
.collect();
let type_ids: Vec<Vec<u32>> = type_ids
.iter_mut()
.map(|item| {
item.extend(vec![0; max_len - item.len()]);
item.to_vec()
})
.collect();
let attention_mask: Vec<Vec<u32>> = attention_mask
.iter_mut()
.map(|item| {
item.extend(vec![0; max_len - item.len()]);
item.to_vec()
})
.collect();
let input_ids = Tensor::new(input_ids, device)?;
let type_ids = Tensor::new(type_ids, device)?;
let attention_mask = Tensor::new(attention_mask, device)?;
Ok((input_ids, type_ids, attention_mask, vec_seq))
}
pub fn load_images(
images: Option<Vec<String>>,
device: &Device,
) -> anyhow::Result<(Tensor, Vec<String>)> {
let vec_imgs = match images {
Some(imgs) => imgs,
None => vec![
"candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg".to_string(),
"candle-examples/examples/yolo-v8/assets/bike.jpg".to_string(),
],
};
let mut images = vec![];
for path in vec_imgs.iter() {
let tensor = load_image(path, 224, device)?;
images.push(tensor);
}
let images = Tensor::stack(&images, 0)?.to_device(device)?;
Ok((images, vec_imgs))
}
fn load_image<T: AsRef<std::path::Path>>(
path: T,
image_size: usize,
device: &Device,
) -> anyhow::Result<Tensor> {
let img = image::ImageReader::open(path)?.decode()?;
let (height, width) = (image_size, image_size);
let img = img.resize_to_fill(
width as u32,
height as u32,
image::imageops::FilterType::Triangle,
);
let img = img.to_rgb8().into_raw();
let img = Tensor::from_vec(img, (height, width, 3), device)?.permute((2, 0, 1))?;
let mean = Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], device)?.reshape((3, 1, 1))?;
let std =
Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], device)?.reshape((3, 1, 1))?;
let img = (img.to_dtype(DType::F32)? / 255.)?
.broadcast_sub(&mean)?
.broadcast_div(&std)?;
Ok(img)
}
| candle/candle-examples/examples/chinese_clip/main.rs/0 | {
"file_path": "candle/candle-examples/examples/chinese_clip/main.rs",
"repo_id": "candle",
"token_count": 3197
} | 35 |
#include <stdint.h>
#include "reduction_utils.cuh"
template <typename scalar_t>
__device__ void
rms_norm_kernel(scalar_t *__restrict__ out, // [num_tokens, hidden_size]
const scalar_t *__restrict__ input, // [num_tokens, hidden_size]
const float epsilon, const uint32_t num_tokens,
const uint32_t hidden_size) {
__shared__ float s_variance;
float variance = 0.0f;
for (int idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) {
const float x = (float)input[blockIdx.x * hidden_size + idx];
variance += x * x;
}
variance = blockReduceSum<float>(variance);
if (threadIdx.x == 0) {
s_variance = rsqrtf(variance / hidden_size + epsilon);
}
__syncthreads();
for (int idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) {
float x = (float)input[blockIdx.x * hidden_size + idx];
out[blockIdx.x * hidden_size + idx] = ((scalar_t)(x * s_variance));
}
}
extern "C" __global__ void rms_f32(
float *__restrict__ out, // [num_tokens, hidden_size]
const float *__restrict__ input, // [num_tokens, hidden_size]
const float epsilon, const uint32_t num_tokens,
const uint32_t hidden_size) {
rms_norm_kernel(out, input, epsilon, num_tokens, hidden_size);
}
| candle/candle-examples/examples/custom-ops/kernels/layernorm_kernels.cu/0 | {
"file_path": "candle/candle-examples/examples/custom-ops/kernels/layernorm_kernels.cu",
"repo_id": "candle",
"token_count": 561
} | 36 |
# candle-efficientnet
Demonstrates a Candle implementation of EfficientNet for image classification based on ImageNet classes.
## Running an example
```bash
$ cargo run --example efficientnet --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which b1
> bicycle-built-for-two, tandem bicycle, tandem: 45.85%
> mountain bike, all-terrain bike, off-roader: 30.45%
> crash helmet : 2.58%
> unicycle, monocycle : 2.21%
> tricycle, trike, velocipede: 1.53%
```
| candle/candle-examples/examples/efficientnet/README.md/0 | {
"file_path": "candle/candle-examples/examples/efficientnet/README.md",
"repo_id": "candle",
"token_count": 171
} | 37 |
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use candle_transformers::models::{clip, flux, t5};
use anyhow::{Error as E, Result};
use candle::{IndexOp, Module, Tensor};
use candle_nn::VarBuilder;
use clap::Parser;
use tokenizers::Tokenizer;
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Args {
/// The prompt to be used for image generation.
#[arg(long, default_value = "A rusty robot walking on a beach")]
prompt: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Use the quantized model.
#[arg(long)]
quantized: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// The height in pixels of the generated image.
#[arg(long)]
height: Option<usize>,
/// The width in pixels of the generated image.
#[arg(long)]
width: Option<usize>,
#[arg(long)]
decode_only: Option<String>,
#[arg(long, value_enum, default_value = "schnell")]
model: Model,
/// Use the slower kernels.
#[arg(long)]
use_dmmv: bool,
/// The seed to use when generating random samples.
#[arg(long)]
seed: Option<u64>,
}
#[derive(Debug, Clone, Copy, clap::ValueEnum, PartialEq, Eq)]
enum Model {
Schnell,
Dev,
}
fn run(args: Args) -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let Args {
prompt,
cpu,
height,
width,
tracing,
decode_only,
model,
quantized,
..
} = args;
let width = width.unwrap_or(1360);
let height = height.unwrap_or(768);
let _guard = if tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let api = hf_hub::api::sync::Api::new()?;
let bf_repo = {
let name = match model {
Model::Dev => "black-forest-labs/FLUX.1-dev",
Model::Schnell => "black-forest-labs/FLUX.1-schnell",
};
api.repo(hf_hub::Repo::model(name.to_string()))
};
let device = candle_examples::device(cpu)?;
if let Some(seed) = args.seed {
device.set_seed(seed)?;
}
let dtype = device.bf16_default_to_f32();
let img = match decode_only {
None => {
let t5_emb = {
let repo = api.repo(hf_hub::Repo::with_revision(
"google/t5-v1_1-xxl".to_string(),
hf_hub::RepoType::Model,
"refs/pr/2".to_string(),
));
let model_file = repo.get("model.safetensors")?;
let vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? };
let config_filename = repo.get("config.json")?;
let config = std::fs::read_to_string(config_filename)?;
let config: t5::Config = serde_json::from_str(&config)?;
let mut model = t5::T5EncoderModel::load(vb, &config)?;
let tokenizer_filename = api
.model("lmz/mt5-tokenizers".to_string())
.get("t5-v1_1-xxl.tokenizer.json")?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let mut tokens = tokenizer
.encode(prompt.as_str(), true)
.map_err(E::msg)?
.get_ids()
.to_vec();
tokens.resize(256, 0);
let input_token_ids = Tensor::new(&tokens[..], &device)?.unsqueeze(0)?;
println!("{input_token_ids}");
model.forward(&input_token_ids)?
};
println!("T5\n{t5_emb}");
let clip_emb = {
let repo = api.repo(hf_hub::Repo::model(
"openai/clip-vit-large-patch14".to_string(),
));
let model_file = repo.get("model.safetensors")?;
let vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? };
// https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json
let config = clip::text_model::ClipTextConfig {
vocab_size: 49408,
projection_dim: 768,
activation: clip::text_model::Activation::QuickGelu,
intermediate_size: 3072,
embed_dim: 768,
max_position_embeddings: 77,
pad_with: None,
num_hidden_layers: 12,
num_attention_heads: 12,
};
let model =
clip::text_model::ClipTextTransformer::new(vb.pp("text_model"), &config)?;
let tokenizer_filename = repo.get("tokenizer.json")?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let tokens = tokenizer
.encode(prompt.as_str(), true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let input_token_ids = Tensor::new(&tokens[..], &device)?.unsqueeze(0)?;
println!("{input_token_ids}");
model.forward(&input_token_ids)?
};
println!("CLIP\n{clip_emb}");
let img = {
let cfg = match model {
Model::Dev => flux::model::Config::dev(),
Model::Schnell => flux::model::Config::schnell(),
};
let img = flux::sampling::get_noise(1, height, width, &device)?.to_dtype(dtype)?;
let state = if quantized {
flux::sampling::State::new(
&t5_emb.to_dtype(candle::DType::F32)?,
&clip_emb.to_dtype(candle::DType::F32)?,
&img.to_dtype(candle::DType::F32)?,
)?
} else {
flux::sampling::State::new(&t5_emb, &clip_emb, &img)?
};
let timesteps = match model {
Model::Dev => {
flux::sampling::get_schedule(50, Some((state.img.dim(1)?, 0.5, 1.15)))
}
Model::Schnell => flux::sampling::get_schedule(4, None),
};
println!("{state:?}");
println!("{timesteps:?}");
if quantized {
let model_file = match model {
Model::Schnell => api
.repo(hf_hub::Repo::model("lmz/candle-flux".to_string()))
.get("flux1-schnell.gguf")?,
Model::Dev => todo!(),
};
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(
model_file, &device,
)?;
let model = flux::quantized_model::Flux::new(&cfg, vb)?;
flux::sampling::denoise(
&model,
&state.img,
&state.img_ids,
&state.txt,
&state.txt_ids,
&state.vec,
×teps,
4.,
)?
.to_dtype(dtype)?
} else {
let model_file = match model {
Model::Schnell => bf_repo.get("flux1-schnell.safetensors")?,
Model::Dev => bf_repo.get("flux1-dev.safetensors")?,
};
let vb = unsafe {
VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)?
};
let model = flux::model::Flux::new(&cfg, vb)?;
flux::sampling::denoise(
&model,
&state.img,
&state.img_ids,
&state.txt,
&state.txt_ids,
&state.vec,
×teps,
4.,
)?
}
};
flux::sampling::unpack(&img, height, width)?
}
Some(file) => {
let mut st = candle::safetensors::load(file, &device)?;
st.remove("img").unwrap().to_dtype(dtype)?
}
};
println!("latent img\n{img}");
let img = {
let model_file = bf_repo.get("ae.safetensors")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? };
let cfg = match model {
Model::Dev => flux::autoencoder::Config::dev(),
Model::Schnell => flux::autoencoder::Config::schnell(),
};
let model = flux::autoencoder::AutoEncoder::new(&cfg, vb)?;
model.decode(&img)?
};
println!("img\n{img}");
let img = ((img.clamp(-1f32, 1f32)? + 1.0)? * 127.5)?.to_dtype(candle::DType::U8)?;
let filename = match args.seed {
None => "out.jpg".to_string(),
Some(s) => format!("out-{s}.jpg"),
};
candle_examples::save_image(&img.i(0)?, filename)?;
Ok(())
}
fn main() -> Result<()> {
let args = Args::parse();
#[cfg(feature = "cuda")]
candle::quantized::cuda::set_force_dmmv(args.use_dmmv);
run(args)
}
| candle/candle-examples/examples/flux/main.rs/0 | {
"file_path": "candle/candle-examples/examples/flux/main.rs",
"repo_id": "candle",
"token_count": 5445
} | 38 |
# candle-llama
Candle implementations of various Llama based architectures.
## Running an example
```bash
$ cargo run --example llama -- --prompt "Machine learning is " --which v32-3b-instruct
> Machine learning is the part of computer science which deals with the development of algorithms and
``` | candle/candle-examples/examples/llama/README.md/0 | {
"file_path": "candle/candle-examples/examples/llama/README.md",
"repo_id": "candle",
"token_count": 78
} | 39 |
# candle-marian-mt
`marian-mt` is a neural machine translation model. In this example it is used to
translate text from French to English. See the associated [model
card](https://huggingface.co/Helsinki-NLP/opus-mt-tc-big-fr-en) for details on
the model itself.
## Running an example
```bash
cargo run --example marian-mt --release -- \
--text "Demain, dès l'aube, à l'heure où blanchit la campagne, Je partirai. Vois-tu, je sais que tu m'attends. J'irai par la forêt, j'irai par la montagne. Je ne puis demeurer loin de toi plus longtemps."
```
```
<NIL> Tomorrow, at dawn, at the time when the country is whitening, I will go. See,
I know you are waiting for me. I will go through the forest, I will go through the
mountain. I cannot stay far from you any longer.</s>
```
### Changing model and language pairs
```bash
$ cargo run --example marian-mt --release -- --text "hello, how are you." --which base --language-pair en-zh
你好,你好吗?
```
## Generating the tokenizer.json files
The tokenizer for each `marian-mt` model was trained independently,
meaning each new model needs unique tokenizer encoders and decoders.
You can use the `./python/convert_slow_tokenizer.py` script in this directory to generate
the `tokenizer.json` config files from the hf-hub repos.
The script requires all the packages in `./python/requirements.txt` or `./python/uv.lock`
to be installed, and has only been tested for `python 3.12.7`.
| candle/candle-examples/examples/marian-mt/README.md/0 | {
"file_path": "candle/candle-examples/examples/marian-mt/README.md",
"repo_id": "candle",
"token_count": 471
} | 40 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Error as E;
use clap::{Parser, ValueEnum};
use candle::{DType, Device, Tensor};
use candle_nn::{ops::softmax, VarBuilder};
use candle_transformers::models::mobileclip;
use tokenizers::Tokenizer;
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Which {
S1,
S2,
}
impl Which {
fn model_name(&self) -> String {
let name = match self {
Self::S1 => "S1",
Self::S2 => "S2",
};
format!("apple/MobileCLIP-{name}-OpenCLIP")
}
fn config(&self) -> mobileclip::MobileClipConfig {
match self {
Self::S1 => mobileclip::MobileClipConfig::s1(),
Self::S2 => mobileclip::MobileClipConfig::s2(),
}
}
}
#[derive(Parser)]
struct Args {
#[arg(long, use_value_delimiter = true)]
images: Option<Vec<String>>,
#[arg(long)]
cpu: bool,
/// Use the pytorch weights rather than the safetensors ones
#[arg(long)]
use_pth: bool,
#[arg(long, use_value_delimiter = true)]
sequences: Option<Vec<String>>,
#[arg(value_enum, long, default_value_t=Which::S1)]
which: Which,
}
fn load_images<T: AsRef<std::path::Path>>(
paths: &Vec<T>,
image_size: usize,
) -> anyhow::Result<Tensor> {
let mut images = vec![];
for path in paths {
let tensor = candle_examples::imagenet::load_image_with_std_mean(
path,
image_size,
&[0.0, 0.0, 0.0],
&[1.0, 1.0, 1.0],
)?;
images.push(tensor);
}
let images = Tensor::stack(&images, 0)?;
Ok(images)
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let model_name = args.which.model_name();
let api = hf_hub::api::sync::Api::new()?;
let api = api.model(model_name);
let model_file = if args.use_pth {
api.get("open_clip_pytorch_model.bin")?
} else {
api.get("open_clip_model.safetensors")?
};
let tokenizer = api.get("tokenizer.json")?;
let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?;
let config = &args.which.config();
let device = candle_examples::device(args.cpu)?;
let vec_imgs = match args.images {
Some(imgs) => imgs,
None => vec![
"candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg".to_string(),
"candle-examples/examples/yolo-v8/assets/bike.jpg".to_string(),
],
};
let images = load_images(&vec_imgs, config.image_size)?.to_device(&device)?;
let vb = if args.use_pth {
VarBuilder::from_pth(&model_file, DType::F32, &device)?
} else {
unsafe {
VarBuilder::from_mmaped_safetensors(
std::slice::from_ref(&model_file),
DType::F32,
&device,
)?
}
};
let model = mobileclip::MobileClipModel::new(vb, config)?;
let (input_ids, vec_seq) = tokenize_sequences(args.sequences, &tokenizer, &device)?;
let (_logits_per_text, logits_per_image) = model.forward(&images, &input_ids)?;
let softmax_image = softmax(&logits_per_image, 1)?;
let softmax_image_vec = softmax_image.flatten_all()?.to_vec1::<f32>()?;
println!("softmax_image_vec: {softmax_image_vec:?}");
let probability_vec = softmax_image_vec
.iter()
.map(|v| v * 100.0)
.collect::<Vec<f32>>();
let probability_per_image = probability_vec.len() / vec_imgs.len();
for (i, img) in vec_imgs.iter().enumerate() {
let start = i * probability_per_image;
let end = start + probability_per_image;
let prob = &probability_vec[start..end];
println!("\n\nResults for image: {img}\n");
for (i, p) in prob.iter().enumerate() {
println!("Probability: {:.4}% Text: {}", p, vec_seq[i]);
}
}
Ok(())
}
pub fn tokenize_sequences(
sequences: Option<Vec<String>>,
tokenizer: &Tokenizer,
device: &Device,
) -> anyhow::Result<(Tensor, Vec<String>)> {
// let pad_id = *tokenizer
// .get_vocab(true)
// .get("<|endoftext|>")
// .ok_or(E::msg("No pad token"))?;
// The model does not work well if the text is padded using the <|endoftext|> token, using 0
// as the original OpenCLIP code.
let pad_id = 0;
let vec_seq = match sequences {
Some(seq) => seq,
None => vec![
"a cycling race".to_string(),
"a photo of two cats".to_string(),
"a robot holding a candle".to_string(),
],
};
let mut tokens = vec![];
for seq in vec_seq.clone() {
let encoding = tokenizer.encode(seq, true).map_err(E::msg)?;
tokens.push(encoding.get_ids().to_vec());
}
let max_len = tokens.iter().map(|v| v.len()).max().unwrap_or(0);
// Pad the sequences to have the same length
for token_vec in tokens.iter_mut() {
let len_diff = max_len - token_vec.len();
if len_diff > 0 {
token_vec.extend(vec![pad_id; len_diff]);
}
}
let input_ids = Tensor::new(tokens, device)?;
Ok((input_ids, vec_seq))
}
| candle/candle-examples/examples/mobileclip/main.rs/0 | {
"file_path": "candle/candle-examples/examples/mobileclip/main.rs",
"repo_id": "candle",
"token_count": 2389
} | 41 |
## Using ONNX models in Candle
This example demonstrates how to run [ONNX](https://github.com/onnx/onnx) based LLM models in Candle.
This script only implements SmolLM-135M right now.
You can run the examples with following commands:
```bash
cargo run --example onnx-llm --features onnx
``` | candle/candle-examples/examples/onnx-llm/README.md/0 | {
"file_path": "candle/candle-examples/examples/onnx-llm/README.md",
"repo_id": "candle",
"token_count": 94
} | 42 |
# candle-quantized-gemma
Candle implementation of quantized Gemma.
## Running an example
```bash
$ cargo run --example quantized-gemma -- --prompt "Write a function to calculate fibonacci numbers. "
> ```python
> def fibonacci(n):
> """Calculates the nth Fibonacci number using recursion."""
> if n <= 1:
> return n
> else:
> return fibonacci(n-1) + fibonacci(n-2
> ```
``` | candle/candle-examples/examples/quantized-gemma/README.md/0 | {
"file_path": "candle/candle-examples/examples/quantized-gemma/README.md",
"repo_id": "candle",
"token_count": 159
} | 43 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::{Error as E, Result};
use clap::Parser;
use candle_transformers::models::quantized_recurrent_gemma::Model as QModel;
use candle_transformers::models::recurrent_gemma::{Config, Model as BModel};
use candle::{DType, Device, Tensor};
use candle_examples::token_output_stream::TokenOutputStream;
use candle_nn::VarBuilder;
use candle_transformers::generation::LogitsProcessor;
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::Tokenizer;
enum Model {
B(BModel),
Q(QModel),
}
impl Model {
fn forward(&mut self, xs: &Tensor, pos: usize) -> candle::Result<Tensor> {
match self {
Self::B(m) => m.forward(xs, pos),
Self::Q(m) => m.forward(xs, pos),
}
}
}
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum Which {
#[value(name = "2b")]
Base2B,
#[value(name = "2b-it")]
Instruct2B,
}
struct TextGeneration {
model: Model,
device: Device,
tokenizer: TokenOutputStream,
logits_processor: LogitsProcessor,
repeat_penalty: f32,
repeat_last_n: usize,
}
impl TextGeneration {
#[allow(clippy::too_many_arguments)]
fn new(
model: Model,
tokenizer: Tokenizer,
seed: u64,
temp: Option<f64>,
top_p: Option<f64>,
top_k: usize,
repeat_penalty: f32,
repeat_last_n: usize,
device: &Device,
) -> Self {
let sampling = match temp {
None => candle_transformers::generation::Sampling::ArgMax,
Some(temperature) => match top_p {
None => candle_transformers::generation::Sampling::TopK {
temperature,
k: top_k,
},
Some(top_p) => candle_transformers::generation::Sampling::TopKThenTopP {
temperature,
k: top_k,
p: top_p,
},
},
};
let logits_processor = LogitsProcessor::from_sampling(seed, sampling);
Self {
model,
tokenizer: TokenOutputStream::new(tokenizer),
logits_processor,
repeat_penalty,
repeat_last_n,
device: device.clone(),
}
}
fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> {
use std::io::Write;
self.tokenizer.clear();
let mut tokens = self
.tokenizer
.tokenizer()
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
for &t in tokens.iter() {
if let Some(t) = self.tokenizer.next_token(t)? {
print!("{t}")
}
}
std::io::stdout().flush()?;
let mut generated_tokens = 0usize;
let eos_token = match self.tokenizer.get_token("<eos>") {
Some(token) => token,
None => anyhow::bail!("cannot find the <eos> token"),
};
let start_gen = std::time::Instant::now();
for index in 0..sample_len {
let context_size = if index > 0 { 1 } else { tokens.len() };
let start_pos = tokens.len().saturating_sub(context_size);
let ctxt = &tokens[start_pos..];
let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?;
let logits = self.model.forward(&input, start_pos)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let logits = if self.repeat_penalty == 1. {
logits
} else {
let start_at = tokens.len().saturating_sub(self.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
self.repeat_penalty,
&tokens[start_at..],
)?
};
let next_token = self.logits_processor.sample(&logits)?;
tokens.push(next_token);
generated_tokens += 1;
if next_token == eos_token {
break;
}
if let Some(t) = self.tokenizer.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
}
let dt = start_gen.elapsed();
if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? {
print!("{rest}");
}
std::io::stdout().flush()?;
println!(
"\n{generated_tokens} tokens generated ({:.2} token/s)",
generated_tokens as f64 / dt.as_secs_f64(),
);
Ok(())
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
prompt: String,
/// The temperature used to generate samples.
#[arg(long)]
temperature: Option<f64>,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
#[arg(long, default_value_t = 250)]
top_k: usize,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// The length of the sample to generate (in tokens).
#[arg(long, short = 'n', default_value_t = 8000)]
sample_len: usize,
#[arg(long)]
model_id: Option<String>,
#[arg(long, default_value = "main")]
revision: String,
#[arg(long)]
tokenizer_file: Option<String>,
#[arg(long)]
config_file: Option<String>,
#[arg(long)]
weight_files: Option<String>,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
/// The model to use.
#[arg(long, default_value = "2b")]
which: Which,
#[arg(long)]
quantized: bool,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
println!(
"temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}",
args.temperature.unwrap_or(0.),
args.repeat_penalty,
args.repeat_last_n
);
let start = std::time::Instant::now();
let api = Api::new()?;
let model_id = match &args.model_id {
Some(model_id) => model_id.to_string(),
None => match args.which {
Which::Base2B => "google/recurrentgemma-2b".to_string(),
Which::Instruct2B => "google/recurrentgemma-2b-it".to_string(),
},
};
let repo = api.repo(Repo::with_revision(
model_id,
RepoType::Model,
args.revision,
));
let tokenizer_filename = match args.tokenizer_file {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("tokenizer.json")?,
};
let config_filename = match args.config_file {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("config.json")?,
};
let filenames = match args.weight_files {
Some(files) => files
.split(',')
.map(std::path::PathBuf::from)
.collect::<Vec<_>>(),
None => {
if args.quantized {
let filename = match args.which {
Which::Base2B => "recurrent-gemma-2b-q4k.gguf",
Which::Instruct2B => "recurrent-gemma-7b-q4k.gguf",
};
let filename = api.model("lmz/candle-gemma".to_string()).get(filename)?;
vec![filename]
} else {
candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?
}
}
};
println!("retrieved the files in {:?}", start.elapsed());
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let config: Config = serde_json::from_reader(std::fs::File::open(config_filename)?)?;
let start = std::time::Instant::now();
let device = candle_examples::device(args.cpu)?;
let dtype = if device.is_cuda() {
DType::BF16
} else {
DType::F32
};
let model = if args.quantized {
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(
&filenames[0],
&device,
)?;
Model::Q(QModel::new(&config, vb.pp("model"))?)
} else {
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? };
Model::B(BModel::new(&config, vb.pp("model"))?)
};
println!("loaded the model in {:?}", start.elapsed());
let mut pipeline = TextGeneration::new(
model,
tokenizer,
args.seed,
args.temperature,
args.top_p,
args.top_k,
args.repeat_penalty,
args.repeat_last_n,
&device,
);
pipeline.run(&args.prompt, args.sample_len)?;
Ok(())
}
| candle/candle-examples/examples/recurrent-gemma/main.rs/0 | {
"file_path": "candle/candle-examples/examples/recurrent-gemma/main.rs",
"repo_id": "candle",
"token_count": 4698
} | 44 |
## candle-rwkv
The [RWKV model](https://wiki.rwkv.com/) is a recurrent neural network model
with performance on par with transformer architectures. Several variants are
available, candle implements the v5 and v6 versions and can be used with
Eagle 7B([blog post](https://blog.rwkv.com/p/eagle-7b-soaring-past-transformers)).
```bash
$ cargo run --example rwkv --release -- --prompt "The smallest prime is "
avx: true, neon: false, simd128: false, f16c: true
temp: 0.00 repeat-penalty: 1.10 repeat-last-n: 64
The smallest prime is ϕ(2) = 2.
The smallest composite is ϕ(3) = 3.
The smallest perfect number is ϕ(5) = 5.
The smallest perfect square is ϕ(4) = 4.
The smallest perfect cube is ϕ(6) = 6.
```
| candle/candle-examples/examples/rwkv/README.md/0 | {
"file_path": "candle/candle-examples/examples/rwkv/README.md",
"repo_id": "candle",
"token_count": 235
} | 45 |
# candle-splade
SPLADE is a neural retrieval model which learns query/document sparse expansion via the BERT MLM head and sparse regularization. Sparse representations benefit from several advantages compared to dense approaches: efficient use of inverted index, explicit lexical match, interpretability... They also seem to be better at generalizing on out-of-domain data. In this example we can do the following two tasks:
- Compute sparse embedding for a given query.
- Compute similarities between a set of sentences using sparse embeddings.
## Sparse Sentence embeddings
SPLADE is used to compute the sparse embedding for a given query. The model weights
are downloaded from the hub on the first run. This makes use of the BertForMaskedLM model.
```bash
cargo run --example splade --release -- --prompt "Here is a test sentence"
> "the out there still house inside position outside stay standing hotel sitting dog animal sit bird cat statue cats"
> [0.10270107, 0.269471, 0.047469813, 0.0016636598, 0.05394874, 0.23105666, 0.037475716, 0.45949644, 0.009062732, 0.06790692, 0.0327835, 0.33122346, 0.16863061, 0.12688516, 0.340983, 0.044972017, 0.47724655, 0.01765311, 0.37331146]
```
```bash
cargo run --example splade --release --features
> score: 0.47 'The new movie is awesome' 'The new movie is so great'
> score: 0.43 'The cat sits outside' 'The cat plays in the garden'
> score: 0.14 'I love pasta' 'Do you like pizza?'
> score: 0.11 'A man is playing guitar' 'The cat plays in the garden'
> score: 0.05 'A man is playing guitar' 'A woman watches TV'
```
| candle/candle-examples/examples/splade/README.md/0 | {
"file_path": "candle/candle-examples/examples/splade/README.md",
"repo_id": "candle",
"token_count": 474
} | 46 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use std::path::Path;
use anyhow::{anyhow, Error as E, Result};
use clap::Parser;
use candle_transformers::models::stella_en_v5::{
Config, EmbedDim as StellaEmbedDim, EmbeddingModel,
};
use candle::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use hf_hub::{api::sync::Api, Repo};
use tokenizers::{PaddingDirection, PaddingParams, PaddingStrategy, Tokenizer};
struct Embedding {
model: EmbeddingModel,
device: Device,
tokenizer: Tokenizer,
}
impl Embedding {
fn new(model: EmbeddingModel, tokenizer: Tokenizer, device: &Device) -> Self {
Self {
model,
tokenizer,
device: device.clone(),
}
}
fn encode(&mut self, task: EncodeTask, text: Option<String>) -> Result<()> {
// Just shocasing embeddings, this has no real value
if let Some(text) = text {
let qry = task.query_preproc(&[text]);
let encoding = self.tokenizer.encode(qry, true).map_err(|e| anyhow!(e))?;
let shape = (1, encoding.len());
let input = Tensor::from_slice(encoding.get_ids(), shape, &self.device)?;
let mask = Tensor::from_slice(encoding.get_attention_mask(), shape, &self.device)?;
let result = self.model.forward(&input, &mask)?;
println!("embeddings: {result}");
} else {
// Examples copied from [Model Card](https://huggingface.co/dunzhang/stella_en_1.5B_v5#transformers)
let queries = [
"What are some ways to reduce stress?".to_string(),
"What are the benefits of drinking green tea?".to_string(),
];
let docs = [
"There are many effective ways to reduce stress. Some common techniques include deep breathing, meditation, and physical activity. Engaging in hobbies, spending time in nature, and connecting with loved ones can also help alleviate stress. Additionally, setting boundaries, practicing self-care, and learning to say no can prevent stress from building up.".to_string(),
"Green tea has been consumed for centuries and is known for its potential health benefits. It contains antioxidants that may help protect the body against damage caused by free radicals. Regular consumption of green tea has been associated with improved heart health, enhanced cognitive function, and a reduced risk of certain types of cancer. The polyphenols in green tea may also have anti-inflammatory and weight loss properties.".to_string(),
];
// We only encode the queries and not the data
let qry = task.query_preproc(&queries);
let mut qry_encoded = self
.tokenizer
.encode_batch(qry, true)
.map_err(|e| anyhow!(e))?;
let mut docs_encoded = self
.tokenizer
.encode_batch(docs.to_vec(), true)
.map_err(|e| anyhow!(e))?;
let qry_embed = {
// Now, we generate the tensors for the `input` and `mask`
let shape = (qry_encoded.len(), qry_encoded[1].len());
let mut ids = Tensor::zeros(shape, DType::U32, &self.device)?;
let mut masks = Tensor::zeros(shape, DType::U8, &self.device)?;
for (i, e) in qry_encoded.drain(..).enumerate() {
let input_id =
Tensor::from_iter(e.get_ids().to_vec(), &self.device)?.unsqueeze(0)?;
let mask = Tensor::from_iter(e.get_attention_mask().to_vec(), &self.device)?
.to_dtype(DType::U8)?
.unsqueeze(0)?;
ids =
ids.slice_assign(&[i..i + 1, 0..input_id.dims2().unwrap().1], &input_id)?;
masks = masks.slice_assign(&[i..i + 1, 0..mask.dims2().unwrap().1], &mask)?;
}
// Let's generate the embeddings for the query, we are going to be normalizing the result.
// For larger datasets, you can call `.forward()` on batches and run a `l2 norm` pass on the entire data
self.model.forward_norm(&ids, &masks)?
};
let doc_embed = {
let shape = (docs_encoded.len(), docs_encoded[1].len());
let mut ids = Tensor::zeros(shape, DType::U32, &self.device)?;
let mut masks = Tensor::zeros(shape, DType::U8, &self.device)?;
for (i, e) in docs_encoded.drain(..).enumerate() {
let input_id =
Tensor::from_iter(e.get_ids().to_vec(), &self.device)?.unsqueeze(0)?;
let mask = Tensor::from_iter(e.get_attention_mask().to_vec(), &self.device)?
.to_dtype(DType::U8)?
.unsqueeze(0)?;
ids =
ids.slice_assign(&[i..i + 1, 0..input_id.dims2().unwrap().1], &input_id)?;
masks = masks.slice_assign(&[i..i + 1, 0..mask.dims2().unwrap().1], &mask)?;
}
// Let's generate the embeddings for the query, we are going to be normalizing the result.
// For larger datasets, you can call `.forward()` on batches and run a `l2 norm` pass on the entire data
self.model.forward_norm(&ids, &masks)?
};
println!(
"Embed shapes:\nQuery: {:?}\nDocs: {:?}",
qry_embed.shape(),
doc_embed.shape()
); // [2, 1024] for head dim `1024`
// a matmul to generate the `similarity` score
let res = qry_embed.matmul(&doc_embed.t()?)?;
for (k, v) in queries.iter().enumerate() {
let tnsr = res.get(k)?;
let max = tnsr.argmax(0)?.to_scalar::<u32>()?;
println!(
"\nScore: {}\nQuery: {}\nAnswer: {}\n\n",
tnsr.get(max as usize)?.to_scalar::<f32>()?,
v,
docs[k]
);
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, clap::ValueEnum, PartialEq, Eq)]
enum EmbedDim {
#[value(name = "256")]
Dim256,
#[value(name = "768")]
Dim768,
#[value(name = "1024")]
Dim1024,
#[value(name = "2048")]
Dim2048,
#[value(name = "4096")]
Dim4096,
#[value(name = "6144")]
Dim6144,
#[value(name = "8192")]
Dim8192,
}
impl EmbedDim {
/// Returns dir path to the embed head weights int he repo
pub fn embed_dim_default_dir(&self) -> &'static str {
match self {
Self::Dim256 => "2_Dense_256",
Self::Dim768 => "2_Dense_768",
Self::Dim1024 => "2_Dense_1024",
Self::Dim2048 => "2_Dense_2048",
Self::Dim4096 => "2_Dense_4096",
Self::Dim6144 => "2_Dense_6144",
Self::Dim8192 => "2_Dense_8192",
}
}
/// Resolves the `EmbedDim` for given variant
pub fn embed_dim(&self) -> StellaEmbedDim {
match self {
Self::Dim256 => StellaEmbedDim::Dim256,
Self::Dim768 => StellaEmbedDim::Dim768,
Self::Dim1024 => StellaEmbedDim::Dim1024,
Self::Dim2048 => StellaEmbedDim::Dim2048,
Self::Dim4096 => StellaEmbedDim::Dim4096,
Self::Dim6144 => StellaEmbedDim::Dim6144,
Self::Dim8192 => StellaEmbedDim::Dim8192,
}
}
}
#[derive(Clone, Copy, Debug, clap::ValueEnum, PartialEq, Eq)]
pub enum EncodeTask {
/// `s2p` is the `retrieval` task
/// Default in this example
#[value(name = "s2p")]
S2P,
/// `s2s` is the semantic similarity task
#[value(name = "s2s")]
S2S,
}
impl EncodeTask {
/// Preprocess a set of inputs basef on a template suggested by the model authors
/// See: https://huggingface.co/dunzhang/stella_en_1.5B_v5#introduction
pub fn query_preproc(&self, txt: &[String]) -> Vec<String> {
let instruct = match self {
Self::S2P => {
"Given a web search query, retrieve relevant passages that answer the query."
}
Self::S2S => "Retrieve semantically similar text.",
};
txt.iter()
.map(|s| format!("Instruct: {instruct}\nQuery: {s}"))
.collect::<Vec<_>>()
}
}
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum Which {
#[value(name = "1.5b")]
Large,
#[value(name = "400m")]
Small,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
#[arg(long)]
which: Which,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
use_flash_attn: bool,
#[arg(long)]
query: Option<String>,
#[arg(long, default_value = "1024")]
embed_dim: Option<EmbedDim>,
#[arg(long)]
tokenizer_file: Option<String>,
#[arg(long)]
base_weight_files: Option<String>,
#[arg(long)]
embed_head_weight_files: Option<String>,
/// `Stella` is trained on 2 tasks: See [`Model Card`](https://huggingface.co/dunzhang/stella_en_1.5B_v5)
/// `s2s`: Semantic textual similarity
/// `s2p`: Retrieval task - `Default` in this example
#[arg(long, default_value = "s2p")]
task: Option<EncodeTask>,
}
// Tokenizer creation is super critical in our case.
// We are going to be `padding: Left` for each batch
fn create_tokenizer(tokenizer_file: &Path, which: Which) -> Result<Tokenizer> {
let mut tokenizer = Tokenizer::from_file(tokenizer_file).map_err(E::msg)?;
if which == Which::Large {
let pad_id = if let Some(pad_id) = tokenizer.token_to_id("<|endoftext|>") {
pad_id
} else {
return Err(anyhow!(
"Tokenizer doesn't contain expected `<|endoftext|>` token"
));
};
// This part is super important, we are padding the tokens to the *`left`* and not the usual *`right`* padding
tokenizer.with_padding(Some(PaddingParams {
strategy: PaddingStrategy::BatchLongest,
direction: PaddingDirection::Left,
pad_id,
pad_token: "<|endoftext|>".to_string(),
..Default::default()
}));
} else {
tokenizer.with_padding(Some(PaddingParams {
strategy: PaddingStrategy::BatchLongest,
direction: PaddingDirection::Right,
..Default::default()
}));
}
Ok(tokenizer)
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
let start = std::time::Instant::now();
let api = Api::new()?;
let embed_dim = match args.embed_dim {
Some(d) => d,
None => EmbedDim::Dim1024,
};
let (repo, cfg) = match args.which {
Which::Large => (
"dunzhang/stella_en_1.5B_v5",
Config::new_1_5_b_v5(embed_dim.embed_dim()),
),
Which::Small => (
"dunzhang/stella_en_400M_v5",
Config::new_400_m_v5(embed_dim.embed_dim()),
),
};
let repo = api.repo(Repo::model(repo.to_string()));
let tokenizer_filename = match args.tokenizer_file {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("tokenizer.json")?,
};
// Note, if you are providing `weight_files`, ensure that the `--embed_dim` dimensions provided matches the weights
// E.g. if you are using `--embed_dim 1024`, the weight files should include the `.safetensors` file from `2_Dense_1024` dir of the repo
let base_weight_files = match args.base_weight_files {
Some(files) => files
.split(',')
.map(std::path::PathBuf::from)
.collect::<Vec<_>>(),
None => {
vec![repo.get("model.safetensors")?]
}
};
let embed_weight_files = match args.embed_head_weight_files {
Some(files) => files
.split(',')
.map(std::path::PathBuf::from)
.collect::<Vec<_>>(),
None => {
let head_w_path = format!("{}/model.safetensors", embed_dim.embed_dim_default_dir());
vec![repo.get(&head_w_path)?]
}
};
println!("retrieved the files in {:?}", start.elapsed());
// Initializing the tokenizer which would require us to add padding to the `left` for batch encoding
let tokenizer = create_tokenizer(tokenizer_filename.as_path(), args.which)?;
let start = std::time::Instant::now();
let device = candle_examples::device(args.cpu)?;
let dtype = DType::F32;
let base_vb =
unsafe { VarBuilder::from_mmaped_safetensors(&base_weight_files, dtype, &device)? };
// Embedding layer is always built on F32 for accuracy
let embed_vb =
unsafe { VarBuilder::from_mmaped_safetensors(&embed_weight_files, DType::F32, &device)? };
let model = EmbeddingModel::new(&cfg, base_vb, embed_vb)?;
println!("loaded the model in {:?}", start.elapsed());
let mut embedding = Embedding::new(model, tokenizer, &device);
let task = args.task.map_or(EncodeTask::S2P, |t| t);
embedding.encode(task, args.query)
}
| candle/candle-examples/examples/stella-en-v5/main.rs/0 | {
"file_path": "candle/candle-examples/examples/stella-en-v5/main.rs",
"repo_id": "candle",
"token_count": 6512
} | 47 |
use std::path::PathBuf;
use anyhow::{Context, Error, Result};
use byteorder::{LittleEndian, ReadBytesExt};
use candle::{utils, DType, Device, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::models::voxtral;
use candle_transformers::models::voxtral::{
VoxtralCache, VoxtralConfig, VoxtralEncoderConfig, VoxtralForConditionalGeneration,
VoxtralGenerationConfig, VoxtralLlamaConfig as LlamaConfig,
};
use serde_json;
use std::io::Cursor;
use tekken::Tekkenizer;
use super::download;
const SAMPLE_RATE: u32 = 16000;
#[derive(Debug, serde::Serialize)]
pub struct TranscriptionResult {
pub text: String,
pub tokens: Vec<u32>,
}
pub struct VoxtralModel {
model: VoxtralForConditionalGeneration,
tokenizer: Tekkenizer,
device: Device,
audio_token_id: usize,
cache: VoxtralCache,
}
impl VoxtralModel {
/// # Errors
///
/// Returns an error if the model cannot be loaded.
pub fn new(model_id: &str, use_cpu: bool) -> Result<Self> {
// Determine device
let device = if !use_cpu && utils::cuda_is_available() {
Device::new_cuda(0).context("Failed to create CUDA device")?
} else {
Device::Cpu
};
let (model_files, tokenizer_file) = download::model_files(model_id)?;
// Load model configuration
let config = load_model_config(&model_files.0)?;
// Load safetensors files
let vb = load_model_weights(&model_files.1, &device)?;
// Create model
let model = VoxtralForConditionalGeneration::new(&config, vb)?;
// Load tokenizer
let tokenizer = Tekkenizer::from_file(tokenizer_file).map_err(Error::msg)?;
// Create cache
let cache = VoxtralCache::new(true, DType::F16, &config.text_config, &device)?;
let audio_token_id = config.audio_token_id;
Ok(Self {
model,
tokenizer,
device,
audio_token_id,
cache,
})
}
/// Transcribe audio and return both text and tokens
///
/// # Errors
///
/// Returns an error if the audio data cannot be transcribed.
pub fn transcribe_audio(
&mut self,
audio_data: &[f32],
sample_rate: u32,
) -> Result<TranscriptionResult> {
// Resample to 16kHz if needed
let audio = if sample_rate == SAMPLE_RATE {
audio_data.to_vec()
} else {
candle_examples::audio::resample(audio_data, sample_rate, SAMPLE_RATE)
.context("Failed to resample audio")?
};
// Pad audio to multiple of 480000 samples before feature extraction
let chunk_size = 480000; // 30 seconds * 16000 Hz
let padded_audio = if audio.len() % chunk_size != 0 {
// Pad to next multiple of chunk_size
let target_samples = ((audio.len() / chunk_size) + 1) * chunk_size;
let mut padded = audio.clone();
padded.resize(target_samples, 0.0); // Pad with zeros
padded
} else {
audio
};
// Use the 128-mel filter bank
let mel_bytes = include_bytes!("melfilters128.bytes");
let mut mel_filters = vec![0f32; mel_bytes.len() / 4];
let mut cursor = Cursor::new(mel_bytes);
cursor.read_f32_into::<LittleEndian>(&mut mel_filters)?;
let audio_features =
voxtral::extract_features(&padded_audio, &mel_filters, &self.device()).unwrap();
let (result, tokens) = transcribe_with_voxtral(
&self.model,
&self.tokenizer,
&audio_features,
&self.audio_token_id,
&self.device,
&self.cache.clone(),
)?;
Ok(TranscriptionResult {
text: result,
tokens,
})
}
pub fn device(&self) -> &Device {
&self.device
}
}
fn transcribe_with_voxtral(
model: &VoxtralForConditionalGeneration,
tokenizer: &Tekkenizer,
audio_features: &Tensor,
audio_token_id: &usize,
device: &Device,
cache: &VoxtralCache,
) -> Result<(String, Vec<u32>)> {
// Validate audio features shape
let audio_dims = audio_features.dims();
if audio_dims.len() != 3 {
return Err(anyhow::anyhow!(
"Audio features must be 3D tensor (batch, mels, time), got shape: {:?}",
audio_dims
));
}
if audio_dims[1] != 128 {
return Err(anyhow::anyhow!(
"Audio features must have 128 mel bins, got {}",
audio_dims[1]
));
}
// Create the exact token sequence that HuggingFace processor generates
let mut input_tokens = Vec::new();
// Pattern: <s>[INST][BEGIN_AUDIO][AUDIO]*N[/INST]lang:en[TRANSCRIBE]
input_tokens.push(1u32); // BOS: <s>
input_tokens.push(3u32); // [INST]
input_tokens.push(25u32); // [BEGIN_AUDIO]
// Calculate number of audio tokens to match Python exactly: 7 chunks × 375 tokens = 2625
let batch_size = audio_features.dim(0)?; // Number of chunks (should be 7)
// Python uses exactly 375 tokens per 3000-frame chunk
let tokens_per_chunk = 375; // Fixed value from Python analysis
let num_audio_tokens = batch_size * tokens_per_chunk;
// Add AUDIO tokens
for _ in 0..num_audio_tokens {
input_tokens.push(*audio_token_id as u32); // [AUDIO] token (24)
}
input_tokens.push(4u32); // [/INST]
input_tokens.push(9909u32); // lang
input_tokens.push(1058u32); // :
input_tokens.push(1262u32); // en
input_tokens.push(34u32); // [TRANSCRIBE]
let input_len = input_tokens.len();
let input_ids = Tensor::new(input_tokens, device)?.unsqueeze(0)?;
// Generate response using the model (match Python parameters)
let generation_config = VoxtralGenerationConfig {
max_new_tokens: 1000, // max_new_tokens
temperature: 0.0, // temperature=0 for deterministic generation
top_p: None,
device: device.clone(),
cache: Some(cache.clone()),
};
let generated_tokens = model
.generate(
&input_ids,
Some(audio_features), // Audio features will be processed and inserted at audio token position
generation_config,
)
.map_err(|e| {
println!("Generation error: {:?}", e);
println!("Error details: {:#}", e);
anyhow::anyhow!("Failed to generate tokens: {e}")
})?;
// Decode only the newly generated tokens (skip input prompt)
let new_tokens = if generated_tokens.len() > input_len {
&generated_tokens[input_len..]
} else {
&generated_tokens
};
let decoded_text = tokenizer
.decode(new_tokens, tekken::SpecialTokenPolicy::Ignore)
.map_err(|e| anyhow::anyhow!("Failed to decode tokens: {}", e))?;
// Return both transcription and tokens
Ok((decoded_text, new_tokens.to_vec()))
}
/// Load model weights from safetensors files
fn load_model_weights<'a>(model_files: &'a [PathBuf], device: &Device) -> Result<VarBuilder<'a>> {
let dtype = DType::F16; // F16 for memory efficiency
// MEMORY OPTIMIZATION: Force garbage collection before loading
if let candle::Device::Cuda(_) = device {
device.synchronize()?;
}
// Use memory-mapped loading for efficiency (confirmed better than regular loading)
let vb = unsafe { VarBuilder::from_mmaped_safetensors(model_files, dtype, device)? };
// MEMORY OPTIMIZATION: Force garbage collection after loading
if let candle::Device::Cuda(_) = device {
device.synchronize()?;
}
Ok(vb)
}
/// Load model configuration from JSON file
fn load_model_config(config_file: &PathBuf) -> Result<VoxtralConfig> {
let config_str = std::fs::read_to_string(config_file)?;
// Parse the JSON configuration
let json: serde_json::Value =
serde_json::from_str(&config_str).context("Failed to parse config.json")?;
// Extract audio token ID (should be 24 based on config.json)
let audio_token_id = json
.get("audio_token_id")
.and_then(|v| v.as_u64())
.unwrap_or(24) as usize;
// Parse audio config from JSON
let audio_config = parse_audio_config(&json)?;
// Parse text config from JSON
let text_config = parse_text_config(&json)?;
// Get projector activation function
let projector_hidden_act = json
.get("projector_hidden_act")
.and_then(|v| v.as_str())
.unwrap_or("gelu")
.to_string();
Ok(VoxtralConfig {
audio_config,
text_config,
audio_token_id,
projector_hidden_act,
})
}
/// Parse audio encoder config from JSON
fn parse_audio_config(json: &serde_json::Value) -> Result<VoxtralEncoderConfig> {
let audio_json = json
.get("audio_config")
.ok_or_else(|| anyhow::anyhow!("Missing audio_config in configuration"))?;
Ok(VoxtralEncoderConfig {
vocab_size: audio_json
.get("vocab_size")
.and_then(|v| v.as_u64())
.unwrap_or(51866) as usize,
hidden_size: audio_json
.get("hidden_size")
.and_then(|v| v.as_u64())
.unwrap_or(1280) as usize,
num_hidden_layers: audio_json
.get("num_hidden_layers")
.and_then(|v| v.as_u64())
.unwrap_or(32) as usize,
num_attention_heads: audio_json
.get("num_attention_heads")
.and_then(|v| v.as_u64())
.unwrap_or(20) as usize,
num_key_value_heads: audio_json
.get("num_key_value_heads")
.and_then(|v| v.as_u64())
.unwrap_or(20) as usize,
intermediate_size: audio_json
.get("intermediate_size")
.and_then(|v| v.as_u64())
.unwrap_or(5120) as usize,
dropout: audio_json
.get("dropout")
.and_then(|v| v.as_f64())
.unwrap_or(0.0),
attention_dropout: audio_json
.get("attention_dropout")
.and_then(|v| v.as_f64())
.unwrap_or(0.0),
activation_dropout: audio_json
.get("activation_dropout")
.and_then(|v| v.as_f64())
.unwrap_or(0.0),
activation_function: audio_json
.get("activation_function")
.and_then(|v| v.as_str())
.unwrap_or("gelu")
.to_string(),
max_source_positions: audio_json
.get("max_source_positions")
.and_then(|v| v.as_u64())
.unwrap_or(1500) as usize,
layerdrop: audio_json
.get("layerdrop")
.and_then(|v| v.as_f64())
.unwrap_or(0.0),
initializer_range: audio_json
.get("initializer_range")
.and_then(|v| v.as_f64())
.unwrap_or(0.02),
scale_embedding: audio_json
.get("scale_embedding")
.and_then(|v| v.as_bool())
.unwrap_or(false),
num_mel_bins: audio_json
.get("num_mel_bins")
.and_then(|v| v.as_u64())
.unwrap_or(128) as usize,
head_dim: audio_json
.get("head_dim")
.and_then(|v| v.as_u64())
.unwrap_or(64) as usize,
})
}
/// Parse text model config from JSON
fn parse_text_config(json: &serde_json::Value) -> Result<LlamaConfig> {
let text_json = json
.get("text_config")
.ok_or_else(|| anyhow::anyhow!("Missing text_config in configuration"))?;
Ok(LlamaConfig {
vocab_size: text_json
.get("vocab_size")
.and_then(|v| v.as_u64())
.unwrap_or(131072) as usize,
hidden_size: text_json
.get("hidden_size")
.and_then(|v| v.as_u64())
.unwrap_or(3072) as usize,
intermediate_size: text_json
.get("intermediate_size")
.and_then(|v| v.as_u64())
.unwrap_or(8192) as usize,
num_hidden_layers: text_json
.get("num_hidden_layers")
.and_then(|v| v.as_u64())
.unwrap_or(30) as usize,
num_attention_heads: text_json
.get("num_attention_heads")
.and_then(|v| v.as_u64())
.unwrap_or(32) as usize,
num_key_value_heads: text_json
.get("num_key_value_heads")
.and_then(|v| v.as_u64())
.unwrap_or(8) as usize,
head_dim: text_json
.get("head_dim")
.and_then(|v| v.as_u64())
.map(|v| v as usize),
rms_norm_eps: text_json
.get("rms_norm_eps")
.and_then(|v| v.as_f64())
.unwrap_or(1e-5),
rope_theta: text_json
.get("rope_theta")
.and_then(|v| v.as_f64())
.unwrap_or(100_000_000.0) as f32,
max_position_embeddings: text_json
.get("max_position_embeddings")
.and_then(|v| v.as_u64())
.unwrap_or(131072) as usize,
use_flash_attn: false,
tie_word_embeddings: text_json
.get("attention_bias")
.and_then(|v| v.as_bool())
.unwrap_or(false),
})
}
| candle/candle-examples/examples/voxtral/model.rs/0 | {
"file_path": "candle/candle-examples/examples/voxtral/model.rs",
"repo_id": "candle",
"token_count": 6352
} | 48 |
/******************************************************************************
* Copyright (c) 2024, Tri Dao.
******************************************************************************/
#pragma once
#include <tuple>
#include <cstdio>
#if !defined(__CUDACC_RTC__)
#include "cuda_runtime.h"
#endif
#define CHECK_CUDA(call) \
do { \
cudaError_t status_ = call; \
if (status_ != cudaSuccess) { \
fprintf(stderr, "CUDA error (%s:%d): %s\n", __FILE__, __LINE__, \
cudaGetErrorString(status_)); \
exit(1); \
} \
} while (0)
inline int get_current_device() {
int device;
CHECK_CUDA(cudaGetDevice(&device));
return device;
}
inline std::tuple<int, int> get_compute_capability(int device) {
int capability_major, capability_minor;
CHECK_CUDA(cudaDeviceGetAttribute(&capability_major, cudaDevAttrComputeCapabilityMajor, device));
CHECK_CUDA(cudaDeviceGetAttribute(&capability_minor, cudaDevAttrComputeCapabilityMinor, device));
return {capability_major, capability_minor};
}
inline int get_num_sm(int device) {
int multiprocessor_count;
CHECK_CUDA(cudaDeviceGetAttribute(&multiprocessor_count, cudaDevAttrMultiProcessorCount, device));
return multiprocessor_count;
}
| candle/candle-flash-attn/kernels/hardware_info.h/0 | {
"file_path": "candle/candle-flash-attn/kernels/hardware_info.h",
"repo_id": "candle",
"token_count": 854
} | 49 |
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=src/compatibility.cuh");
println!("cargo:rerun-if-changed=src/cuda_utils.cuh");
println!("cargo:rerun-if-changed=src/binary_op_macros.cuh");
let builder = bindgen_cuda::Builder::default();
println!("cargo:info={builder:?}");
let bindings = builder.build_ptx().unwrap();
bindings.write("src/ptx.rs").unwrap();
}
| candle/candle-kernels/build.rs/0 | {
"file_path": "candle/candle-kernels/build.rs",
"repo_id": "candle",
"token_count": 178
} | 50 |
#define _USE_MATH_DEFINES
#include<math.h>
#include<stdint.h>
#include "cuda_utils.cuh"
#define UNARY_OP(TYPENAME, FN_NAME, FUNC) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME *inp, \
TYPENAME *out \
) { \
const size_t *dims = info; \
const size_t *strides = info + num_dims; \
if (info == nullptr || is_contiguous(num_dims, dims, strides)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
TYPENAME x = inp ? inp[i] : out[i]; \
out[i] = FUNC; \
} \
} \
else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
TYPENAME x = inp ? inp[strided_i] : out[i]; \
out[i] = FUNC; \
} \
} \
} \
template<typename T>
__device__ __forceinline__ T gelu_erf_fwd(T x) {
return x * normcdfg(x);
}
template<typename T>
__device__ __forceinline__ T gelu_fwd(T x) {
T x_sq = x * x;
T x_cube = x_sq * x;
T alpha = x + static_cast<T>(0.044715) * x_cube;
return static_cast<T>(0.5) * x * (static_cast<T>(1.0) + tanhg(static_cast<T>(M_2_SQRTPI * M_SQRT1_2) * alpha));
}
template<typename T>
__device__ __forceinline__ T elu_fwd(T x, T alpha) {
if (x > static_cast<T>(0)) {
return x;
}
return alpha * (expg(x) - static_cast<T>(1));
}
template<typename T>
__device__ __forceinline__ T relu_fwd(T x) {
T zero = 0.;
return maxg(x, zero);
}
template<typename T>
__device__ __forceinline__ T silu_fwd(T x) {
return x / (static_cast<T>(1) + expg(-x));
}
template<typename T>
__device__ __forceinline__ T sigmoid_fwd(T x) {
return recipg(static_cast<T>(1) + expg(-x));
}
#define UNARY_OP1(TYPENAME, FN_NAME, FUNC) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME param, \
const TYPENAME *inp, \
TYPENAME *out \
) { \
const size_t *dims = info; \
const size_t *strides = info + num_dims; \
if (info == nullptr || is_contiguous(num_dims, dims, strides)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
TYPENAME x = inp ? inp[i] : out[i]; \
out[i] = FUNC; \
} \
} \
else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
TYPENAME x = inp ? inp[strided_i] : out[i]; \
out[i] = FUNC; \
} \
} \
} \
template<typename T>
__device__ T sign_(T t) {
return static_cast<T>(t > static_cast<T>(0)) - static_cast<T>(t < static_cast<T>(0));
}
#if __CUDA_ARCH__ >= 800
UNARY_OP(__nv_bfloat16, ucopy_bf16, x)
UNARY_OP(__nv_bfloat16, uneg_bf16, -x)
UNARY_OP(__nv_bfloat16, urecip_bf16, recipg(x))
UNARY_OP(__nv_bfloat16, uexp_bf16, expg(x))
UNARY_OP(__nv_bfloat16, ulog_bf16, logg(x))
UNARY_OP(__nv_bfloat16, usin_bf16, sing(x))
UNARY_OP(__nv_bfloat16, ucos_bf16, cosg(x))
UNARY_OP(__nv_bfloat16, utanh_bf16, tanhg(x))
UNARY_OP(__nv_bfloat16, uerf_bf16, erfg(x))
UNARY_OP(__nv_bfloat16, uceil_bf16, ceilg(x))
UNARY_OP(__nv_bfloat16, ufloor_bf16, floorg(x))
UNARY_OP(__nv_bfloat16, uround_bf16, roundg(x))
UNARY_OP(__nv_bfloat16, unormcdf_bf16, normcdfg(x))
UNARY_OP(__nv_bfloat16, uabs_bf16, absg(x))
UNARY_OP(__nv_bfloat16, usqr_bf16, x*x)
UNARY_OP(__nv_bfloat16, usqrt_bf16, sqrtg(x))
UNARY_OP(__nv_bfloat16, ugelu_bf16, gelu_fwd(x))
UNARY_OP(__nv_bfloat16, ugelu_erf_bf16, gelu_erf_fwd(x))
UNARY_OP(__nv_bfloat16, urelu_bf16, relu_fwd(x))
UNARY_OP1(__nv_bfloat16, uelu_bf16, elu_fwd(x, param))
UNARY_OP(__nv_bfloat16, usilu_bf16, silu_fwd(x))
UNARY_OP1(__nv_bfloat16, upowf_bf16, powg(x, param))
UNARY_OP(__nv_bfloat16, usign_bf16, sign_(x))
UNARY_OP(__nv_bfloat16, usigmoid_bf16, sigmoid_fwd(x))
#endif
#if __CUDA_ARCH__ >= 890
#define F8E4M3_TO_FLOAT(x) __half2float(__nv_cvt_fp8_to_halfraw(x.__x, __NV_E4M3))
UNARY_OP(__nv_fp8_e4m3, ucopy_f8_e4m3, x)
UNARY_OP(__nv_fp8_e4m3, uneg_fp8_e4m3, __nv_fp8_e4m3(-F8E4M3_TO_FLOAT(x)))
UNARY_OP(__nv_fp8_e4m3, urecip_fp8_e4m3, recipg(x))
UNARY_OP(__nv_fp8_e4m3, uexp_fp8_e4m3, expg(x))
UNARY_OP(__nv_fp8_e4m3, ulog_fp8_e4m3, logg(x))
UNARY_OP(__nv_fp8_e4m3, usin_fp8_e4m3, sing(x))
UNARY_OP(__nv_fp8_e4m3, ucos_fp8_e4m3, cosg(x))
UNARY_OP(__nv_fp8_e4m3, utanh_fp8_e4m3, tanhg(x))
UNARY_OP(__nv_fp8_e4m3, uerf_fp8_e4m3, erfg(x))
UNARY_OP(__nv_fp8_e4m3, uceil_fp8_e4m3, ceilg(x))
UNARY_OP(__nv_fp8_e4m3, ufloor_fp8_e4m3, floorg(x))
UNARY_OP(__nv_fp8_e4m3, uround_fp8_e4m3, roundg(x))
UNARY_OP(__nv_fp8_e4m3, unormcdf_fp8_e4m3, normcdfg(x))
UNARY_OP(__nv_fp8_e4m3, uabs_fp8_e4m3, absg(x))
UNARY_OP(__nv_fp8_e4m3, usqr_fp8_e4m3, __nv_fp8_e4m3(F8E4M3_TO_FLOAT(x)*F8E4M3_TO_FLOAT(x)))
UNARY_OP(__nv_fp8_e4m3, usqrt_fp8_e4m3, sqrtg(x))
UNARY_OP(__nv_fp8_e4m3, ugelu_fp8_e4m3, __nv_fp8_e4m3(gelu_fwd(F8E4M3_TO_FLOAT(x))))
UNARY_OP(__nv_fp8_e4m3, ugelu_erf_fp8_e4m3, __nv_fp8_e4m3(gelu_erf_fwd(F8E4M3_TO_FLOAT(x))))
UNARY_OP(__nv_fp8_e4m3, urelu_fp8_e4m3, __nv_fp8_e4m3(relu_fwd(F8E4M3_TO_FLOAT(x))))
UNARY_OP1(__nv_fp8_e4m3, uelu_fp8_e4m3, __nv_fp8_e4m3(elu_fwd(F8E4M3_TO_FLOAT(x), F8E4M3_TO_FLOAT(param))))
UNARY_OP(__nv_fp8_e4m3, usilu_fp8_e4m3, __nv_fp8_e4m3(silu_fwd(F8E4M3_TO_FLOAT(x))))
UNARY_OP1(__nv_fp8_e4m3, upowf_fp8_e4m3, powg(x, param))
UNARY_OP(__nv_fp8_e4m3, usign_fp8_e4m3, __nv_fp8_e4m3(sign_(F8E4M3_TO_FLOAT(x))))
UNARY_OP(__nv_fp8_e4m3, usigmoid_fp8_e4m3, __nv_fp8_e4m3(sigmoid_fwd(F8E4M3_TO_FLOAT(x))))
#endif
#if __CUDA_ARCH__ >= 530
UNARY_OP(__half, ucopy_f16, x)
UNARY_OP(__half, uneg_f16, -x)
UNARY_OP(__half, urecip_f16, recipg(x))
UNARY_OP(__half, uexp_f16, expg(x))
UNARY_OP(__half, ulog_f16, logg(x))
UNARY_OP(__half, usin_f16, sing(x))
UNARY_OP(__half, ucos_f16, cosg(x))
UNARY_OP(__half, utanh_f16, tanhg(x))
UNARY_OP(__half, uerf_f16, erfg(x))
UNARY_OP(__half, uceil_f16, ceilg(x))
UNARY_OP(__half, ufloor_f16, floorg(x))
UNARY_OP(__half, uround_f16, roundg(x))
UNARY_OP(__half, unormcdf_f16, normcdfg(x))
UNARY_OP(__half, uabs_f16, absg(x))
UNARY_OP(__half, usqr_f16, x*x)
UNARY_OP(__half, usqrt_f16, sqrtg(x))
UNARY_OP(__half, ugelu_f16, gelu_fwd(x))
UNARY_OP(__half, ugelu_erf_f16, gelu_erf_fwd(x))
UNARY_OP(__half, urelu_f16, relu_fwd(x))
UNARY_OP1(__half, uelu_f16, elu_fwd(x, param))
UNARY_OP(__half, usilu_f16, silu_fwd(x))
UNARY_OP1(__half, upowf_f16, powg(x, param))
UNARY_OP(__half, usign_f16, sign_(x))
UNARY_OP(__half, usigmoid_f16, sigmoid_fwd(x))
#endif
UNARY_OP(uint8_t, ucopy_u8, x)
UNARY_OP(uint32_t, ucopy_u32, x)
UNARY_OP(int64_t, ucopy_i64, x)
UNARY_OP(float, ucopy_f32, x)
UNARY_OP(double, ucopy_f64, x)
UNARY_OP(float, uneg_f32, -x)
UNARY_OP(double, uneg_f64, -x)
UNARY_OP(float, urecip_f32, recipg(x))
UNARY_OP(double, urecip_f64, recipg(x))
UNARY_OP(float, uexp_f32, expg(x))
UNARY_OP(double, uexp_f64, expg(x))
UNARY_OP(float, ulog_f32, logg(x))
UNARY_OP(double, ulog_f64, logg(x))
UNARY_OP(float, usin_f32, sing(x))
UNARY_OP(double, usin_f64, sing(x))
UNARY_OP(float, ucos_f32, cosg(x))
UNARY_OP(double, ucos_f64, cosg(x))
UNARY_OP(float, utanh_f32, tanhg(x))
UNARY_OP(double, utanh_f64, tanhg(x))
UNARY_OP(float, uerf_f32, erfg(x))
UNARY_OP(double, uerf_f64, erfg(x))
UNARY_OP(float, uceil_f32, ceilg(x))
UNARY_OP(double, uceil_f64, ceilg(x))
UNARY_OP(float, ufloor_f32, floorg(x))
UNARY_OP(double, ufloor_f64, floorg(x))
UNARY_OP(float, uround_f32, roundg(x))
UNARY_OP(double, uround_f64, roundg(x))
UNARY_OP(float, unormcdf_f32, normcdfg(x))
UNARY_OP(double, unormcdf_f64, normcdfg(x))
UNARY_OP(float, uabs_f32, absg(x))
UNARY_OP(double, uabs_f64, absg(x))
UNARY_OP(float, usqr_f32, x*x)
UNARY_OP(double, usqr_f64, x*x)
UNARY_OP(float, usqrt_f32, sqrtg(x))
UNARY_OP(double, usqrt_f64, sqrtg(x))
UNARY_OP(float, ugelu_f32, gelu_fwd(x))
UNARY_OP(double, ugelu_f64, gelu_fwd(x))
UNARY_OP(float, ugelu_erf_f32, gelu_erf_fwd(x))
UNARY_OP(double, ugelu_erf_f64, gelu_erf_fwd(x))
UNARY_OP(float, urelu_f32, relu_fwd(x))
UNARY_OP(double, urelu_f64, relu_fwd(x))
UNARY_OP1(float, uelu_f32, elu_fwd(x, param))
UNARY_OP1(double, uelu_f64, elu_fwd(x, param))
UNARY_OP(float, usilu_f32, silu_fwd(x))
UNARY_OP(double, usilu_f64, silu_fwd(x))
UNARY_OP1(float, upowf_f32, powg(x, param))
UNARY_OP1(double, upowf_f64, powg(x, param))
UNARY_OP(float, usign_f32, sign_(x))
UNARY_OP(double, usign_f64, sign_(x))
UNARY_OP(float, usigmoid_f32, sigmoid_fwd(x))
UNARY_OP(double, usigmoid_f64, sigmoid_fwd(x))
| candle/candle-kernels/src/unary.cu/0 | {
"file_path": "candle/candle-kernels/src/unary.cu",
"repo_id": "candle",
"token_count": 4820
} | 51 |
#include <metal_stdlib>
#include <metal_limits>
using namespace metal;
METAL_FUNC uint nonzero(uint n) {
return n == 0 ? 1 : n;
}
template<uint N>
constexpr uint nonzero() {
return N == 0 ? 1 : N;
}
template<typename T>
constexpr ushort granularity() {
return nonzero<vec_elements<T>::value>();
}
METAL_FUNC uint next_p2(uint x) {
return 1 << (32 - clz(x - 1));
}
METAL_FUNC uint prev_p2(uint x) {
return 1 << (31 - clz(x));
}
constant uint MAX_SHARED_MEM = 32767;
template<typename T>
METAL_FUNC uint max_shared_mem(uint n) {
return min(n, prev_p2(MAX_SHARED_MEM / sizeof(T)));
}
METAL_FUNC uint get_strided_index(
uint idx,
constant const uint &num_dims,
constant const size_t *dims,
constant const size_t *strides
) {
uint strided_i = 0;
for (uint d = 0; d < num_dims; d++) {
uint dim_idx = num_dims - 1 - d;
strided_i += (idx % dims[dim_idx]) * strides[dim_idx];
idx /= dims[dim_idx];
}
return strided_i;
}
struct Divide {
template<typename T>
METAL_FUNC T operator()(T a, T b) { return a / b; }
METAL_FUNC float operator()(float a, float b) { return fast::divide(a, b); }
METAL_FUNC half operator()(half a, half b) { return divide(a, b); }
#if defined(__HAVE_BFLOAT__)
METAL_FUNC bfloat operator()(bfloat a, bfloat b) { return static_cast<bfloat>(fast::divide(a, b)); }
#endif
};
struct Exp {
template<typename T>
METAL_FUNC T operator()(T a) { return fast::exp(a); }
METAL_FUNC float operator()(float a) { return fast::exp(a); }
METAL_FUNC half operator()(half a) { return exp(a); }
#if defined(__HAVE_BFLOAT__)
METAL_FUNC bfloat operator()(bfloat a) { return static_cast<bfloat>(fast::exp(a)); }
#endif
};
// Keeps track of the index of the value in the reduction operation (argmin, argmax, etc.)
// and the value itself. The index is also used to break ties in the reduction operation.
template <typename T>
struct indexed {
uint i;
T val;
constexpr indexed<T>() threadgroup = default;
};
template <typename T>
struct is_indexed_type {
static constant constexpr bool value = false;
};
template <typename T>
constexpr constant bool is_indexed_t = is_indexed_type<T>::value;
template <typename T>
struct is_indexed_type<indexed<T>> {
static constant constexpr bool value = true;
};
template <typename T>
constexpr constant bool not_indexed_t = !is_indexed_t<T>;
template<typename T>
constexpr METAL_FUNC bool operator<(indexed<T> lhs, indexed<T> rhs) {
return lhs.val < rhs.val || (lhs.val == rhs.val && lhs.i < rhs.i);
}
template<typename T>
constexpr METAL_FUNC bool operator>(indexed<T> lhs, indexed<T> rhs) {
return lhs.val > rhs.val || (lhs.val == rhs.val && lhs.i < rhs.i);
}
template<typename T>
struct _numeric_limits_impl<indexed<T>> {
static constexpr METAL_FUNC indexed<T> lowest() {
return indexed<T>{ 0, numeric_limits<T>::lowest() };
}
static constexpr METAL_FUNC indexed<T> max() {
return indexed<T>{ 0, numeric_limits<T>::max() };
}
};
#if __METAL_VERSION__ >= 220
METAL_FUNC int64_t simd_shuffle_down(int64_t data, uint16_t delta) {
return as_type<int64_t>(simd_shuffle_down(as_type<uint2>(data), delta));
}
#endif
#if defined(__HAVE_BFLOAT__)
// Metal does not have simd_shuffle_down for bfloat16
METAL_FUNC bfloat simd_shuffle_down(bfloat value, ushort delta) {
return as_type<bfloat>(simd_shuffle_down(as_type<ushort>(value), delta));
}
#endif
template <typename T>
METAL_FUNC indexed<T> simd_shuffle_down(indexed<T> iv, ushort delta) {
return indexed<T> {
simd_shuffle_down(iv.i, delta),
simd_shuffle_down(iv.val, delta)
};
}
template<typename T>
struct Sum {
static constexpr METAL_FUNC T init() {
return 0;
}
static METAL_FUNC T simd_op(T a) {
return simd_sum(a);
}
template<typename V>
METAL_FUNC V operator()(V a, V b) {
return a + b;
}
};
template<typename T>
struct Mul {
static constexpr METAL_FUNC T init() {
return 1;
}
static METAL_FUNC T simd_op(T a) {
return simd_product(a);
}
template<typename V>
METAL_FUNC V operator()(V a, V b) {
return a * b;
}
};
template<typename T>
struct Min {
static constexpr METAL_FUNC T init() {
return numeric_limits<T>::max();
}
static METAL_FUNC T simd_op(T a) {
return simd_min(a);
}
template<typename V>
METAL_FUNC V operator()(V a, V b) { return a < b ? a : b; }
METAL_FUNC float operator()(float a, float b) { return fast::min(a, b); }
METAL_FUNC half operator()(half a, half b) { return min(a, b); }
METAL_FUNC uint operator()(uint a, uint b) { return min(a, b); }
METAL_FUNC uchar operator()(uchar a, uchar b) { return min(a, b); }
#if __METAL_VERSION__ >= 220
METAL_FUNC long operator()(long a, long b) { return min(a, b); }
#endif
#if defined(__HAVE_BFLOAT__)
METAL_FUNC bfloat operator()(bfloat a, bfloat b) { return static_cast<bfloat>(fast::min(static_cast<float>(a), static_cast<float>(b))); }
#endif
};
template<typename T>
struct Max {
static constexpr METAL_FUNC T init() {
return numeric_limits<T>::lowest();
}
static METAL_FUNC T simd_op(T a) {
return simd_max(a);
}
template<typename V>
METAL_FUNC V operator()(V a, V b) { return a > b ? a : b; }
METAL_FUNC float operator()(float a, float b) { return fast::max(a, b); }
METAL_FUNC half operator()(half a, half b) { return max(a, b); }
METAL_FUNC uint operator()(uint a, uint b) { return max(a, b); }
METAL_FUNC uchar operator()(uchar a, uchar b) { return max(a, b); }
#if __METAL_VERSION__ >= 220
METAL_FUNC long operator()(long a, long b) { return max(a, b); }
#endif
#if defined(__HAVE_BFLOAT__)
METAL_FUNC bfloat operator()(bfloat a, bfloat b) { return static_cast<bfloat>(fast::max(static_cast<float>(a), static_cast<float>(b))); }
#endif
};
template <typename T>
constexpr constant bool is_simd_t = __is_valid_simdgroup_type<T>::value;
template <typename T, typename _E = void>
struct is_valid_simd_type {
static constant constexpr bool value = false;
};
template <typename T>
constexpr constant bool is_valid_simd_t = is_valid_simd_type<T>::value;
template <typename T>
struct is_valid_simd_type<T, typename metal::enable_if_t<is_simd_t<T>>> {
static constant constexpr bool value = true;
};
template <typename T>
struct is_valid_simd_type<indexed<T>, typename metal::enable_if_t<is_valid_simd_t<T>>> {
static constant constexpr bool value = true;
};
#if __METAL_VERSION__ >= 220
template <>
struct is_valid_simd_type<int64_t> {
static constant constexpr bool value = true;
};
#endif
#if defined(__HAVE_BFLOAT__)
template <>
struct is_valid_simd_type<bfloat> {
static constant constexpr bool value = true;
};
#endif
template <typename T, typename _E = void>
struct is_simd_op {
static constant constexpr bool value = false;
};
template <typename T>
struct is_simd_op<Sum<T>, typename metal::enable_if_t<is_simd_t<T>>> {
static constant constexpr bool value = true;
};
template <typename T>
struct is_simd_op<Mul<T>, typename metal::enable_if_t<is_simd_t<T>>> {
static constant constexpr bool value = true;
};
template <typename T>
struct is_simd_op<Min<T>, typename metal::enable_if_t<is_simd_t<T>>> {
static constant constexpr bool value = true;
};
template <typename T>
struct is_simd_op<Max<T>, typename metal::enable_if_t<is_simd_t<T>>> {
static constant constexpr bool value = true;
};
// Helper struct for applying operators.
// The overloaded operator() function is used to apply an operator to two values.
template<typename OP, typename T>
struct operation;
// Specialization for scalar values.
template<typename OP, typename T>
struct operation {
OP op;
METAL_FUNC T operator()(T a, T b) {
return op(a, b);
}
};
// Specialization for indexed values.
template<typename OP, typename T>
struct operation<OP, indexed<T>> {
OP op;
METAL_FUNC indexed<T> operator()(indexed<T> a, indexed<T> b) {
return op(a, b);
}
METAL_FUNC indexed<T> operator()(indexed<T> a, T b, uint idx) {
return this->operator()(a, indexed<T>{ idx, b });
}
};
// Load elements from global memory into shared memory.
// Handles both indexed and non-indexed types by using operate.
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE,
bool STRIDED = false,
typename _E = void
>
struct loader;
// Contiguous
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE
>
struct loader<T, R, OP, BLOCKSIZE, false, typename metal::enable_if_t<not_indexed_t<R>>> {
operation<OP, R> operate;
METAL_FUNC R operator()(
R value,
constant uint &src_numel,
constant uint &el_per_block,
device const T *src,
const uint offset,
const uint tid
) {
uint idx = tid + offset;
const uint stop_idx = min(el_per_block + offset, src_numel);
#pragma clang loop unroll(full)
for (uint i = idx; i < stop_idx; i += BLOCKSIZE) {
value = operate(value, src[i]);
}
return value;
}
METAL_FUNC R operator()(
R value,
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
const uint offset,
const uint tid
) {
return this->operator()(value, src_numel, el_per_block, src, offset, tid);
}
};
// Strided
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE
>
struct loader<T, R, OP, BLOCKSIZE, true, typename metal::enable_if_t<not_indexed_t<R>>> {
operation<OP, R> operate;
METAL_FUNC R operator()(
R value,
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
const uint offset,
const uint tid
) {
const uint idx = tid + offset;
const uint stop_idx = min(el_per_block + offset, src_numel);
#pragma clang loop unroll(full)
for (uint i = idx; i < stop_idx; i += BLOCKSIZE) {
value = operate(value, src[get_strided_index(i, num_dims, dims, strides)]);
}
return value;
}
};
// Indexed contiguous
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE
>
struct loader<T, R, OP, BLOCKSIZE, false, typename metal::enable_if_t<is_indexed_t<R>>> {
operation<OP, R> operate;
METAL_FUNC R operator()(
R value,
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
const uint offset,
const uint tid
) {
const uint thread_id = tid + offset;
const uint stop_idx = min(el_per_block + offset, src_numel);
#pragma clang loop unroll(full)
for (uint i = thread_id; i < stop_idx; i += BLOCKSIZE) {
value = operate(value, src[i], i % dims[num_dims - 1]);
}
return value;
}
};
// Indexed strided
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE
>
struct loader<T, R, OP, BLOCKSIZE, true, typename metal::enable_if_t<is_indexed_t<R>>> {
operation<OP, R> operate;
METAL_FUNC R operator()(
R value,
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
const uint offset,
const uint tid
) {
const uint thread_id = tid + offset;
const uint stop_idx = min(el_per_block + offset, src_numel);
#pragma clang loop unroll(full)
for (uint i = thread_id; i < stop_idx; i += BLOCKSIZE) {
value = operate(value, src[get_strided_index(i, num_dims, dims, strides)], i % dims[num_dims - 1]);
}
return value;
}
};
template<
typename OP,
ushort BLOCKSIZE,
typename T,
typename _E = void
>
struct simdgroup_reducer;
// Specialization for built-in simd operations.
template<typename OP, ushort BLOCKSIZE, typename T>
struct simdgroup_reducer<OP, BLOCKSIZE, T, typename metal::enable_if_t<is_simd_op<OP>::value && is_valid_simd_t<T>>> {
METAL_FUNC T operator()(T value) {
return OP::simd_op(value);
}
};
// Specialization for custom (non-built-in) simd operations.
template<typename OP, ushort BLOCKSIZE, typename T>
struct simdgroup_reducer<OP, BLOCKSIZE, T, typename metal::enable_if_t<!is_simd_op<OP>::value && is_valid_simd_t<T>>> {
operation<OP, T> op;
METAL_FUNC T operator()(T value) {
if (BLOCKSIZE >= 32) value = op(value, simd_shuffle_down(value, 16));
if (BLOCKSIZE >= 16) value = op(value, simd_shuffle_down(value, 8));
if (BLOCKSIZE >= 8) value = op(value, simd_shuffle_down(value, 4));
if (BLOCKSIZE >= 4) value = op(value, simd_shuffle_down(value, 2));
if (BLOCKSIZE >= 2) value = op(value, simd_shuffle_down(value, 1));
return value;
}
};
template<typename T, typename OP, ushort BLOCKSIZE>
struct block_reducer {
simdgroup_reducer<OP, BLOCKSIZE, T> simd_reduce;
operation<OP, T> operate;
threadgroup T *shared;
block_reducer(threadgroup T shared[BLOCKSIZE]) {
this->shared = shared;
}
METAL_FUNC T operator()(T value, const uint tid) {
if (BLOCKSIZE >= 64) {
// Only store in threadgroup shared memory if needed.
shared[tid] = value;
// Threadgroup barrier is needed to ensure that all threads have written to shared memory
threadgroup_barrier(mem_flags::mem_none);
}
#pragma clang loop unroll(full)
for (ushort s = BLOCKSIZE / 2; s >= 64; s >>= 1) {
if (tid < s) shared[tid] = operate(shared[tid], shared[tid + s]);
threadgroup_barrier(mem_flags::mem_none);
}
if (tid < 32) {
// Last shared memory reduce can be done without tid < s check.
if (BLOCKSIZE >= 64) {
value = operate(shared[tid], shared[tid + 32]);
simdgroup_barrier(mem_flags::mem_none);
}
// Remaining 32 threads can be reduced with simdgroup_reduce.
value = simd_reduce(value);
}
return value;
}
};
// Inspired by "Optimizing Parallel Reduction in CUDA" by Mark Harris
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE,
bool STRIDED = false
>
METAL_FUNC void reduce(
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
device R *dst,
threadgroup R shared[BLOCKSIZE],
uint tid [[ thread_index_in_threadgroup ]],
uint dst_id [[ threadgroup_position_in_grid ]]
) {
loader<T, R, OP, BLOCKSIZE, STRIDED> load;
block_reducer<T, OP, BLOCKSIZE> reduce(shared);
// Calcluate offset for the threadgroup of current thread
const uint offset = dst_id * el_per_block;
// Load with reduction from global memory into shared memory
auto value = load(
OP::init(),
src_numel,
num_dims,
dims,
strides,
el_per_block,
src,
offset,
tid
);
// Complete reduction
R result = reduce(value, tid);
if (tid == 0) dst[dst_id] = result;
}
#define reduce_case(OP, T, R, N) \
case N: { \
threadgroup R shared[N]; \
reduce<T, R, OP<R>, N, STRIDED>( \
src_numel, \
num_dims, \
dims, \
strides, \
el_per_block, \
src, \
dst, \
shared, \
tid, \
dst_id); \
break; \
}
#define ARG(...) __VA_ARGS__
#define impl_reduce_inner(OP, NAME, T) \
kernel void NAME( \
constant uint &src_numel, \
constant uint &num_dims, \
constant size_t *dims, \
constant uint &el_per_block, \
device const T *src, \
device T *dst, \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
constant size_t *strides = {}; \
const bool STRIDED = false; \
switch (max_shared_mem<T>(block_dim)) { \
reduce_case(OP, ARG(T), ARG(T), 2048); \
reduce_case(OP, ARG(T), ARG(T), 1024); \
reduce_case(OP, ARG(T), ARG(T), 512); \
reduce_case(OP, ARG(T), ARG(T), 256); \
reduce_case(OP, ARG(T), ARG(T), 128); \
reduce_case(OP, ARG(T), ARG(T), 64); \
reduce_case(OP, ARG(T), ARG(T), 32); \
reduce_case(OP, ARG(T), ARG(T), 16); \
reduce_case(OP, ARG(T), ARG(T), 8); \
reduce_case(OP, ARG(T), ARG(T), 4); \
reduce_case(OP, ARG(T), ARG(T), 2); \
reduce_case(OP, ARG(T), ARG(T), 1); \
} \
}
#define impl_reduce_strided(OP, NAME, T) \
kernel void NAME##_strided( \
constant uint &src_numel, \
constant uint &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant uint &el_per_block, \
device const T *src, \
device T *dst, \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
const bool STRIDED = true; \
switch (max_shared_mem<T>(block_dim)) { \
reduce_case(OP, ARG(T), ARG(T), 2048); \
reduce_case(OP, ARG(T), ARG(T), 1024); \
reduce_case(OP, ARG(T), ARG(T), 512); \
reduce_case(OP, ARG(T), ARG(T), 256); \
reduce_case(OP, ARG(T), ARG(T), 128); \
reduce_case(OP, ARG(T), ARG(T), 64); \
reduce_case(OP, ARG(T), ARG(T), 32); \
reduce_case(OP, ARG(T), ARG(T), 16); \
reduce_case(OP, ARG(T), ARG(T), 8); \
reduce_case(OP, ARG(T), ARG(T), 4); \
reduce_case(OP, ARG(T), ARG(T), 2); \
reduce_case(OP, ARG(T), ARG(T), 1); \
} \
}
#define impl_reduce(OP, NAME, T) \
impl_reduce_inner(OP, NAME, T) \
impl_reduce_strided(OP, NAME, T) \
template<
typename T,
typename ReductionOp,
ushort BLOCKSIZE,
bool STRIDED = false
>
METAL_FUNC void reduce(
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
device uint *dst,
threadgroup indexed<T> shared[BLOCKSIZE],
uint tid [[ thread_index_in_threadgroup ]],
uint dst_id [[ threadgroup_position_in_grid ]]
) {
using I = indexed<T>;
loader<T, indexed<T>, ReductionOp, BLOCKSIZE, STRIDED> load;
block_reducer<I, ReductionOp, BLOCKSIZE> reduce(shared);
// Calcluate offset for the threadgroup of current thread
const uint offset = dst_id * el_per_block;
// Load with reduction from global memory into shared memory
indexed<T> value = load(
ReductionOp::init(),
src_numel,
num_dims,
dims,
strides,
el_per_block,
src,
offset,
tid
);
// Complete reduction
I result = reduce(value, tid);
// Return index of reduce result
if (tid == 0) dst[dst_id] = result.i;
}
#define arg_reduce_case(OP, T, N) \
case N: { \
using I = indexed<T>; \
threadgroup I shared[N]; \
reduce<T, OP<I>, N, STRIDED>( \
src_numel, \
num_dims, \
dims, \
strides, \
el_per_block, \
src, \
dst, \
shared, \
tid, \
dst_id); \
break; \
}
#define impl_arg_reduce_inner(OP, NAME, T) \
kernel void NAME( \
constant uint &src_numel, \
constant uint &num_dims, \
constant size_t *dims, \
constant uint &el_per_block, \
device const T *src, \
device uint *dst, \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
constant size_t *strides = {}; \
const bool STRIDED = false; \
switch (max_shared_mem<indexed<T>>(block_dim)) { \
arg_reduce_case(OP, ARG(T), 1024); \
arg_reduce_case(OP, ARG(T), 512); \
arg_reduce_case(OP, ARG(T), 256); \
arg_reduce_case(OP, ARG(T), 128); \
arg_reduce_case(OP, ARG(T), 64); \
arg_reduce_case(OP, ARG(T), 32); \
arg_reduce_case(OP, ARG(T), 16); \
arg_reduce_case(OP, ARG(T), 8); \
arg_reduce_case(OP, ARG(T), 4); \
arg_reduce_case(OP, ARG(T), 2); \
arg_reduce_case(OP, ARG(T), 1); \
} \
} \
#define impl_arg_reduce_strided(OP, NAME, T) \
kernel void NAME##_strided( \
constant uint &src_numel, \
constant uint &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant uint &el_per_block, \
device const T *src, \
device uint *dst, \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
const bool STRIDED = true; \
const bool INDEXED = true; \
switch (max_shared_mem<indexed<T>>(block_dim)) { \
arg_reduce_case(OP, ARG(T), 1024); \
arg_reduce_case(OP, ARG(T), 512); \
arg_reduce_case(OP, ARG(T), 256); \
arg_reduce_case(OP, ARG(T), 128); \
arg_reduce_case(OP, ARG(T), 64); \
arg_reduce_case(OP, ARG(T), 32); \
arg_reduce_case(OP, ARG(T), 16); \
arg_reduce_case(OP, ARG(T), 8); \
arg_reduce_case(OP, ARG(T), 4); \
arg_reduce_case(OP, ARG(T), 2); \
arg_reduce_case(OP, ARG(T), 1); \
} \
}
#define impl_arg_reduce(OP, NAME, T) \
impl_arg_reduce_inner(OP, NAME, T) \
impl_arg_reduce_strided(OP, NAME, T) \
// Contains the intermediate results for the online softmax calculation.
// m: max
// d: sum of the exponentials
template <typename T>
struct MD {
T m;
float d;
constexpr MD<T>() = default;
constexpr MD<T>() threadgroup = default;
};
// Enable operations for softmax MD
template<typename OP, typename T>
struct operation<OP, MD<T>> {
OP op;
METAL_FUNC MD<T> operator()(MD<T> a, MD<T> b) {
return op(a, b);
}
METAL_FUNC MD<T> operator()(MD<T> a, T b) {
return this->operator()(a, MD<T>{ b, static_cast<T>(1.0) });
}
};
template <typename T>
METAL_FUNC MD<T> simd_shuffle_down(MD<T> md, ushort delta) {
return MD<T> {
simd_shuffle_down(md.m, delta),
simd_shuffle_down(md.d, delta)
};
}
// Enable simd_shuffle_down for softmax MD
template <typename T>
struct is_valid_simd_type<MD<T>, typename metal::enable_if_t<is_valid_simd_t<T>>> {
static constant constexpr bool value = true;
};
template<typename T>
struct MDReduceOp {
Exp fast_exp;
static constexpr METAL_FUNC MD<T> init() {
return MD<T>{ numeric_limits<T>::lowest(), 0 };
}
METAL_FUNC MD<T> operator()(MD<T> a, MD<T> b) {
bool a_bigger = a.m > b.m;
MD<T> bigger_m = a_bigger ? a : b;
MD<T> smaller_m = a_bigger ? b : a;
MD<T> res;
res.d = bigger_m.d + smaller_m.d * fast_exp(smaller_m.m - bigger_m.m);
res.m = bigger_m.m;
return res;
}
};
template<typename T, ushort BLOCKSIZE>
struct finalize_softmax {
Divide fast_divide;
Exp fast_exp;
METAL_FUNC void operator()(
device const T *src,
device T *dst,
threadgroup MD<T> &md_total,
const uint thread_id,
const uint stop_idx
) {
const float d_total_inverse = fast_divide(1.0, md_total.d);
for (uint idx = thread_id; idx < stop_idx; idx += BLOCKSIZE) {
dst[idx] = static_cast<T>(fast_exp(src[idx] - md_total.m) * d_total_inverse);
}
}
};
// Welford's algorithm approach for an online softmax implementation.
// Same as the Online normalizer calculation for softmax: https://arxiv.org/pdf/1805.02867.pdf
template<typename T, ushort BLOCKSIZE>
METAL_FUNC void softmax(
constant uint &src_numel,
constant uint &el_per_block,
device const T *src,
device T *dst,
threadgroup MD<T> shared[BLOCKSIZE],
threadgroup MD<T> &md_total,
uint tid [[ thread_index_in_threadgroup ]],
uint dst_id [[ threadgroup_position_in_grid ]]
) {
using MDReduceOp = MDReduceOp<T>;
loader<T, MD<T>, MDReduceOp, BLOCKSIZE> load;
block_reducer<MD<T>, MDReduceOp, BLOCKSIZE> reduce(shared);
finalize_softmax<T, BLOCKSIZE> softmax_finalize;
// Calcluate offset for the threadgroup of current thread;
const uint offset = dst_id * el_per_block;
// Calculate partial result for current thread
MD<T> md_partial = MD<T> { numeric_limits<T>::lowest(), 0 };
md_partial = load(
md_partial,
src_numel,
el_per_block,
src,
offset,
tid
);
// Reduce in shared memory
MD<T> md = reduce(md_partial, tid);
if (tid == 0) md_total = md;
threadgroup_barrier(mem_flags::mem_none);
// Finalize softmax
const uint thread_id = tid + offset;
const uint stop_idx = min(el_per_block + offset, src_numel);
softmax_finalize(src, dst, md_total, thread_id, stop_idx);
}
#define softmax_case(T, N) \
case N: { \
threadgroup MD<T> shared[N]; \
threadgroup MD<T> md_total; \
softmax<T, N>( \
src_numel, \
el_per_block, \
src, \
dst, \
shared, \
md_total, \
tid, \
dst_id); \
break; \
}
#define impl_softmax(NAME, T) \
kernel void NAME( \
constant uint &src_numel, \
constant uint &el_per_block, \
device const T *src, \
device T *dst, \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
switch (max_shared_mem<T>(block_dim)) { \
softmax_case(T, 1024); \
softmax_case(T, 512); \
softmax_case(T, 256); \
softmax_case(T, 128); \
softmax_case(T, 64); \
softmax_case(T, 32); \
softmax_case(T, 16); \
softmax_case(T, 8); \
softmax_case(T, 4); \
softmax_case(T, 2); \
softmax_case(T, 1); \
} \
}
template<typename T>
METAL_FUNC void rmsnorm(
constant size_t & src_numel,
constant size_t & el_to_sum_per_block,
device const T * src,
device T * dst,
device const T * alpha,
constant float & eps,
uint id,
uint tid,
uint dst_id,
uint block_dim,
threadgroup float * shared_memory
) {
size_t start_idx = dst_id * el_to_sum_per_block;
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel);
size_t idx = start_idx + tid;
float tmp = 0;
while (idx < stop_idx) {
tmp = tmp + float(src[idx]) * float(src[idx]);
idx += block_dim;
}
shared_memory[tid] = tmp;
threadgroup_barrier(mem_flags::mem_threadgroup);
for (uint s = block_dim / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_memory[tid] = shared_memory[tid] + shared_memory[tid + s];
}
threadgroup_barrier(mem_flags::mem_threadgroup);
}
/* wait for shared_memory[0] to be filled */
threadgroup_barrier(mem_flags::mem_threadgroup);
float norm = sqrt(shared_memory[0] / float(el_to_sum_per_block) + eps);
float inv_norm = 1.0f / norm;
idx = start_idx + tid;
while (idx < stop_idx) {
float val = float(src[idx]) * inv_norm;
if (alpha != nullptr) {
val *= float(alpha[idx - start_idx]);
}
dst[idx] = T(val);
idx += block_dim;
}
}
template<typename T>
METAL_FUNC void layernorm(
constant size_t & src_numel,
constant size_t & el_to_sum_per_block,
device const T * src,
device T * dst,
device const T * alpha,
device const T * beta,
constant float & eps,
uint id,
uint tid,
uint dst_id,
uint block_dim,
threadgroup float * shared_memory
) {
size_t start_idx = dst_id * el_to_sum_per_block;
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel);
size_t idx = start_idx + tid;
float tmp1 = 0;
float tmp2 = 0;
while (idx < stop_idx) {
tmp1 += float(src[idx]);
tmp2 += float(src[idx]) * float(src[idx]);
idx += block_dim;
}
shared_memory[tid] = tmp1;
shared_memory[tid + block_dim] = tmp2;
threadgroup_barrier(mem_flags::mem_threadgroup);
for (uint s = block_dim / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_memory[tid] = shared_memory[tid] + shared_memory[tid + s];
shared_memory[block_dim + tid] = shared_memory[block_dim + tid] + shared_memory[block_dim + tid + s];
}
threadgroup_barrier(mem_flags::mem_threadgroup);
}
/* wait for shared_memory[0] to be filled */
threadgroup_barrier(mem_flags::mem_threadgroup);
float mean = shared_memory[0] / float(el_to_sum_per_block);
float var = shared_memory[block_dim] / float(el_to_sum_per_block) - mean * mean;
float inv_norm = 1.0f / sqrt(var + eps);
idx = start_idx + tid;
while (idx < stop_idx) {
float val = (float(src[idx]) - mean) * inv_norm;
if (alpha != nullptr) {
val *= float(alpha[idx - start_idx]);
}
if (beta != nullptr) {
val += float(beta[idx - start_idx]);
}
dst[idx] = T(val);
idx += block_dim;
}
}
constant int THREADGROUP_SIZE = 2048;
#define RMSNORM(NAME, T) \
kernel void NAME( \
constant size_t &src_numel, \
constant size_t &el_to_sum_per_block, \
device const T *src, \
device T *dst, \
device const T *alpha, \
constant float &eps, \
uint id [[ thread_position_in_grid ]], \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
threadgroup float shared_memory[THREADGROUP_SIZE]; \
shared_memory[tid] = 0; \
rmsnorm<T>(src_numel, el_to_sum_per_block, src, dst, alpha, eps, id, tid, dst_id, block_dim, shared_memory); \
} \
#define LAYERNORM(NAME, T) \
kernel void NAME( \
constant size_t &src_numel, \
constant size_t &el_to_sum_per_block, \
device const T *src, \
device T *dst, \
device const T *alpha, \
device const T *beta, \
constant float &eps, \
uint id [[ thread_position_in_grid ]], \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
threadgroup float shared_memory[THREADGROUP_SIZE]; \
shared_memory[tid] = 0; \
layernorm<T>(src_numel, el_to_sum_per_block, src, dst, alpha, beta, eps, id, tid, dst_id, block_dim, shared_memory); \
} \
template<typename T>
METAL_FUNC void ropei(
constant size_t &bh,
constant size_t &td,
constant size_t &stride_b,
device const T *src,
device const T *cos,
device const T *sin,
device T *dst,
uint tid
) {
if (2 * tid >= bh * td) {
return;
}
size_t rope_idx = tid % (td / 2);
if (stride_b > 0) {
size_t b_idx = (2 * tid) / stride_b;
rope_idx += b_idx * (td / 2);
}
T c = cos[rope_idx];
T s = sin[rope_idx];
dst[2 * tid] = src[2 * tid] * c - src[2 * tid + 1] * s;
dst[2 * tid + 1] = src[2 * tid] * s + src[2 * tid + 1] * c;
}
template<typename T>
METAL_FUNC void rope(
constant size_t &bh,
constant size_t &td,
constant size_t &d,
constant size_t &stride_b,
device const T *src,
device const T *cos,
device const T *sin,
device T *dst,
uint idx
) {
if (2 * idx >= bh * td) {
return;
}
size_t i_bh = idx / (td / 2);
size_t i_td = idx - (td / 2) * i_bh;
size_t i_t = i_td / (d / 2);
size_t i_d = i_td - (d / 2) * i_t;
size_t i1 = i_bh * td + i_t * d + i_d;
size_t i2 = i1 + d / 2;
size_t i_cs = i_t * (d / 2) + i_d;
if (stride_b > 0) {
size_t b_idx = (2 * idx) / stride_b;
i_cs += b_idx * (td / 2);
}
T c = cos[i_cs];
T s = sin[i_cs];
dst[i1] = src[i1] * c - src[i2] * s;
dst[i2] = src[i1] * s + src[i2] * c;
}
template<typename T>
METAL_FUNC void rope_thd(
constant size_t &b,
constant size_t &t,
constant size_t &h,
constant size_t &d,
constant size_t &stride_b,
device const T *src,
device const T *cos,
device const T *sin,
device T *dst,
uint idx
) {
if (2 * idx >= b * t * h * d) {
return;
}
const size_t i_bth = idx / (d / 2);
const size_t i_d = idx - (d / 2) * i_bth;
const size_t i_t = (i_bth / h) % t;
const size_t i1 = i_bth * d + i_d;
const size_t i2 = i1 + d / 2;
size_t i_cs = i_t * (d / 2) + i_d;
if (stride_b > 0) {
const size_t b_idx = (2 * idx) / stride_b;
i_cs += b_idx * ((t * d) / 2);
}
T c = cos[i_cs];
T s = sin[i_cs];
dst[i1] = src[i1] * c - src[i2] * s;
dst[i2] = src[i1] * s + src[i2] * c;
}
#define ROPE(FN_NAME, FN_NAME_I, FN_NAME_THD, TYPENAME) \
kernel void FN_NAME_I( \
constant size_t &bh, \
constant size_t &td, \
constant size_t &stride_b, \
device const TYPENAME *src, \
device const TYPENAME *cos, \
device const TYPENAME *sin, \
device TYPENAME *dst, \
uint tid [[ thread_position_in_grid ]] \
) { \
ropei<TYPENAME>(bh, td, stride_b, src, cos, sin, dst, tid); \
}\
kernel void FN_NAME( \
constant size_t &bh, \
constant size_t &td, \
constant size_t &d, \
constant size_t &stride_b, \
device const TYPENAME *src, \
device const TYPENAME *cos, \
device const TYPENAME *sin, \
device TYPENAME *dst, \
uint idx [[ thread_position_in_grid ]] \
) { \
rope<TYPENAME>(bh, td, d, stride_b, src, cos, sin, dst, idx); \
}\
kernel void FN_NAME_THD( \
constant size_t &b, \
constant size_t &t, \
constant size_t &h, \
constant size_t &d, \
constant size_t &stride_b, \
device const TYPENAME *src, \
device const TYPENAME *cos, \
device const TYPENAME *sin, \
device TYPENAME *dst, \
uint idx [[ thread_position_in_grid ]] \
) { \
rope_thd<TYPENAME>(b, t, h, d, stride_b, src, cos, sin, dst, idx); \
}\
RMSNORM(rmsnorm_f32, float)
RMSNORM(rmsnorm_f16, half)
LAYERNORM(layernorm_f32, float)
LAYERNORM(layernorm_f16, half)
ROPE(rope_f32, rope_i_f32, rope_thd_f32, float)
ROPE(rope_f16, rope_i_f16, rope_thd_f16, half)
impl_reduce(Sum, fast_sum_f32, float)
impl_reduce(Sum, fast_sum_u32, uint)
impl_reduce(Sum, fast_sum_f16, half)
impl_reduce(Sum, fast_sum_u8, uint8_t)
impl_reduce(Mul, fast_mul_f32, float)
impl_reduce(Mul, fast_mul_u32, uint)
impl_reduce(Mul, fast_mul_f16, half)
impl_reduce(Mul, fast_mul_u8, uint8_t)
impl_reduce(Max, fast_max_f32, float)
impl_reduce(Max, fast_max_u32, uint)
impl_reduce(Max, fast_max_f16, half)
impl_reduce(Max, fast_max_u8, uint8_t)
impl_reduce(Min, fast_min_f32, float)
impl_reduce(Min, fast_min_u32, uint)
impl_reduce(Min, fast_min_f16, half)
impl_reduce(Min, fast_min_u8, uint8_t)
impl_arg_reduce(Min, fast_argmin_f32, float)
impl_arg_reduce(Min, fast_argmin_f16, half)
impl_arg_reduce(Min, fast_argmin_u32, uint)
impl_arg_reduce(Min, fast_argmin_u8, uint8_t)
impl_arg_reduce(Max, fast_argmax_f32, float)
impl_arg_reduce(Max, fast_argmax_f16, half)
impl_arg_reduce(Max, fast_argmax_u32, uint)
impl_arg_reduce(Max, fast_argmax_u8, uint8_t)
impl_softmax(softmax_f32, float)
impl_softmax(softmax_f16, half)
#if __METAL_VERSION__ >= 220
impl_reduce(Sum, fast_sum_i64, int64_t)
impl_reduce(Mul, fast_mul_i64, int64_t)
impl_reduce(Min, fast_min_i64, int64_t)
impl_reduce(Max, fast_max_i64, int64_t)
impl_arg_reduce(Min, fast_argmin_i64, int64_t)
impl_arg_reduce(Max, fast_argmax_i64, int64_t)
#endif
#if defined(__HAVE_BFLOAT__)
impl_reduce(Sum, fast_sum_bf16, bfloat)
impl_reduce(Mul, fast_mul_bf16, bfloat)
impl_reduce(Max, fast_max_bf16, bfloat)
impl_reduce(Min, fast_min_bf16, bfloat)
impl_arg_reduce(Min, fast_argmin_bf16, bfloat)
impl_arg_reduce(Max, fast_argmax_bf16, bfloat)
impl_softmax(softmax_bf16, bfloat)
RMSNORM(rmsnorm_bf16, bfloat)
LAYERNORM(layernorm_bf16, bfloat)
ROPE(rope_bf16, rope_i_bf16, rope_thd_bf16, bfloat)
#endif
| candle/candle-metal-kernels/src/reduce.metal/0 | {
"file_path": "candle/candle-metal-kernels/src/reduce.metal",
"repo_id": "candle",
"token_count": 21447
} | 52 |
use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle::{DType, Device, Module, Tensor};
use candle_nn::{Conv2d, Conv2dConfig};
use criterion::{black_box, criterion_group, Criterion};
use std::time::Instant;
const B: usize = 1;
const C: usize = 1;
const M: usize = 128;
const K: usize = 128;
const K_SIZE: usize = 3;
fn run(input: Tensor, weight: Tensor, bias: Tensor, config: Conv2dConfig) {
Conv2d::new(weight, Some(bias), config)
.forward(&input)
.unwrap();
}
fn run_conv2d_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) {
let weight = Tensor::ones((1, 1, K_SIZE, K_SIZE), dtype, device)
.unwrap()
.to_dtype(dtype)
.unwrap();
let bias = Tensor::zeros(K, dtype, device).unwrap();
let input = Tensor::ones((B, C, M, K), dtype, device).unwrap();
let mut group = c.benchmark_group(device.bench_name(name));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(
black_box(input.clone()),
black_box(weight.clone()),
black_box(bias.clone()),
Default::default(),
);
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let device = BenchDeviceHandler::new().unwrap();
for d in device.devices {
run_conv2d_benchmark(c, &d, DType::F32, "conv2d_f32");
run_conv2d_benchmark(c, &d, DType::F16, "conv2d_f16");
}
}
criterion_group!(benches, criterion_benchmark);
| candle/candle-nn/benches/benchmarks/conv.rs/0 | {
"file_path": "candle/candle-nn/benches/benchmarks/conv.rs",
"repo_id": "candle",
"token_count": 808
} | 53 |
//! candle-nn
//!
//! ## Other Crates
//!
//! Candle consists of a number of crates. This crate holds structs and functions
//! that allow you to build and train neural nets. You may wish
//! to look at the docs for the other crates which can be found here:
//!
//! - [candle-core](https://docs.rs/candle-core/). Core Datastructures and DataTypes.
//! - [candle-nn](https://docs.rs/candle-nn/). Building blocks for Neural Nets.
//! - [candle-datasets](https://docs.rs/candle-datasets/). Rust access to commonly used Datasets like MNIST.
//! - [candle-examples](https://docs.rs/candle-examples/). Examples of Candle in Use.
//! - [candle-onnx](https://docs.rs/candle-onnx/). Loading and using ONNX models.
//! - [candle-pyo3](https://docs.rs/candle-pyo3/). Access to Candle from Python.
//! - [candle-transformers](https://docs.rs/candle-transformers/). Candle implemntation of many published transformer models.
//!
pub mod activation;
pub mod batch_norm;
pub mod conv;
pub mod embedding;
pub mod encoding;
pub mod func;
pub mod group_norm;
pub mod init;
pub mod kv_cache;
pub mod layer_norm;
pub mod linear;
pub mod loss;
pub mod ops;
pub mod optim;
pub mod rnn;
pub mod rotary_emb;
pub mod sampling;
pub mod sequential;
pub mod var_builder;
pub mod var_map;
pub use activation::{prelu, Activation, PReLU};
pub use batch_norm::{batch_norm, BatchNorm, BatchNormConfig};
pub use conv::{
conv1d, conv1d_no_bias, conv2d, conv2d_no_bias, conv_transpose1d, conv_transpose1d_no_bias,
conv_transpose2d, conv_transpose2d_no_bias, Conv1d, Conv1dConfig, Conv2d, Conv2dConfig,
ConvTranspose1d, ConvTranspose1dConfig, ConvTranspose2d, ConvTranspose2dConfig,
};
pub use embedding::{embedding, Embedding};
pub use func::{func, func_t, Func, FuncT};
pub use group_norm::{group_norm, GroupNorm};
pub use init::Init;
pub use layer_norm::{
layer_norm, layer_norm_no_bias, rms_norm, LayerNorm, LayerNormConfig, RmsNorm,
};
pub use linear::{linear, linear_b, linear_no_bias, Linear};
pub use ops::Dropout;
pub use optim::{AdamW, Optimizer, ParamsAdamW, SGD};
pub use rnn::{gru, lstm, GRUConfig, LSTMConfig, GRU, LSTM, RNN};
pub use sequential::{seq, Sequential};
pub use var_builder::VarBuilder;
pub use var_map::VarMap;
pub use candle::{Module, ModuleT};
| candle/candle-nn/src/lib.rs/0 | {
"file_path": "candle/candle-nn/src/lib.rs",
"repo_id": "candle",
"token_count": 812
} | 54 |
use candle::{Result, Shape, Tensor};
use candle_nn::encoding::one_hot;
#[test]
fn test_i64_one_hot() -> Result<()> {
let device = candle::Device::Cpu;
let indices = Tensor::new(vec![vec![0i64, 2], vec![1, -1]], &device)?;
let depth = 4;
let on_value = 1.0;
let off_value = 0.0;
let one_hot = one_hot::<f32>(indices, depth, on_value, off_value)?;
let expected_matrix = [
[[1., 0., 0., 0.], [0., 0., 1., 0.]],
[[0., 1., 0., 0.], [0., 0., 0., 0.]],
];
assert_eq!(one_hot.shape(), &Shape::from((2, 2, depth)));
let matrix = one_hot.to_vec3::<f32>()?;
assert_eq!(matrix, expected_matrix);
Ok(())
}
#[test]
fn test_rank_3_one_hot() -> Result<()> {
let device = candle::Device::Cpu;
let indices = Tensor::new(
vec![
vec![vec![0i64, 1], vec![2, 3]],
vec![vec![3, 1], vec![1, -1]],
],
&device,
)?;
let depth = 4;
let on_value = 1.0;
let off_value = 0.0;
let one_hot = one_hot::<f32>(indices, depth, on_value, off_value)?;
let expected_matrix = Tensor::new(
vec![
vec![
vec![vec![1f32, 0., 0., 0.], vec![0., 1., 0., 0.]],
vec![vec![0., 0., 1., 0.], vec![0., 0., 0., 1.]],
],
vec![
vec![vec![0., 0., 0., 1.], vec![0., 1., 0., 0.]],
vec![vec![0., 1., 0., 0.], vec![0., 0., 0., 0.]],
],
],
&device,
)?;
assert_eq!(one_hot.shape(), expected_matrix.shape());
assert_eq!(one_hot.dims(), expected_matrix.dims());
let matrix = one_hot.get(1)?.to_vec3::<f32>()?;
let expected_matrix = expected_matrix.get(1)?.to_vec3::<f32>()?;
assert_eq!(matrix, expected_matrix);
Ok(())
}
#[test]
fn test_u8_one_cold() -> Result<()> {
let device = candle::Device::Cpu;
let depth = 4;
let indices = Tensor::new(vec![vec![0i64, 2], vec![1, -1]], &device)?;
let on_value = 0u8;
let off_value = 1;
// Note that the method does not require the turbofish operator, as the type is inferred from the on_value.
let one_cold = one_hot(indices, depth, on_value, off_value)?;
let expected_matrix = [[[0, 1, 1, 1], [1, 1, 0, 1]], [[1, 0, 1, 1], [1, 1, 1, 1]]];
assert_eq!(one_cold.shape(), &Shape::from((2, 2, depth)));
let matrix = one_cold.to_vec3::<u8>()?;
assert_eq!(matrix, expected_matrix);
Ok(())
}
#[test]
fn test_iter() -> Result<()> {
let device = candle::Device::Cpu;
let depth = 4;
let indices = Tensor::new(vec![vec![0i64, 2], vec![1, -1]], &device)?;
let matrix = indices.to_vec2::<i64>()?;
let (dim1, dim2) = indices.dims2()?;
let iter = (0..dim1).flat_map(|i| (0..dim2).map(move |j| (i, j)));
let mut v = vec![0; depth * dim1 * dim2];
for (i, j) in iter {
let idx = i * depth * dim2 + j * depth;
v[idx] = matrix[i][j];
}
for (i, row) in matrix.iter().enumerate() {
for (j, &value) in row.iter().enumerate() {
let idx = i * depth * dim2 + j * depth;
assert_eq!(v[idx], value);
}
}
Ok(())
}
| candle/candle-nn/tests/one_hot.rs/0 | {
"file_path": "candle/candle-nn/tests/one_hot.rs",
"repo_id": "candle",
"token_count": 1592
} | 55 |
from typing import Union, Sequence
class Tensor:
"""
This contains the type hints for the magic methodes of the `candle.Tensor` class.
"""
def __add__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Add a scalar to a tensor or two tensors together.
"""
pass
def __radd__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Add a scalar to a tensor or two tensors together.
"""
pass
def __sub__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Subtract a scalar from a tensor or one tensor from another.
"""
pass
def __truediv__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Divide a tensor by a scalar or one tensor by another.
"""
pass
def __mul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Multiply a tensor by a scalar or one tensor by another.
"""
pass
def __rmul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Multiply a tensor by a scalar or one tensor by another.
"""
pass
def __richcmp__(self, rhs: Union["Tensor", "Scalar"], op) -> "Tensor":
"""
Compare a tensor with a scalar or one tensor with another.
"""
pass
def __getitem__(self, index: Union["Index", "Tensor", Sequence["Index"]]) -> "Tensor":
"""
Return a slice of a tensor.
"""
pass
def __eq__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Compare a tensor with a scalar or one tensor with another.
"""
pass
def __ne__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Compare a tensor with a scalar or one tensor with another.
"""
pass
def __lt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Compare a tensor with a scalar or one tensor with another.
"""
pass
def __le__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Compare a tensor with a scalar or one tensor with another.
"""
pass
def __gt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Compare a tensor with a scalar or one tensor with another.
"""
pass
def __ge__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor":
"""
Compare a tensor with a scalar or one tensor with another.
"""
pass
| candle/candle-pyo3/_additional_typing/__init__.py/0 | {
"file_path": "candle/candle-pyo3/_additional_typing/__init__.py",
"repo_id": "candle",
"token_count": 1174
} | 56 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.