text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# flake8: noqa import gzip import os import datasets import pytest from ..utils import data_dir, train_files class TestTrainFromIterators: @staticmethod def get_tokenizer_trainer(): # START init_tokenizer_trainer from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, trainers tokenizer = Tokenizer(models.Unigram()) tokenizer.normalizer = normalizers.NFKC() tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel() tokenizer.decoder = decoders.ByteLevel() trainer = trainers.UnigramTrainer( vocab_size=20000, initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), special_tokens=["<PAD>", "<BOS>", "<EOS>"], ) # END init_tokenizer_trainer trainer.show_progress = False return tokenizer, trainer @staticmethod def load_dummy_dataset(): # START load_dataset import datasets dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train+test+validation") # END load_dataset @pytest.fixture(scope="class") def setup_gzip_files(self, train_files): with open(train_files["small"], "rt") as small: for n in range(3): path = f"data/my-file.{n}.gz" with gzip.open(path, "wt") as f: f.write(small.read()) def test_train_basic(self): tokenizer, trainer = self.get_tokenizer_trainer() # START train_basic # First few lines of the "Zen of Python" https://www.python.org/dev/peps/pep-0020/ data = [ "Beautiful is better than ugly." "Explicit is better than implicit." "Simple is better than complex." "Complex is better than complicated." "Flat is better than nested." "Sparse is better than dense." "Readability counts." ] tokenizer.train_from_iterator(data, trainer=trainer) # END train_basic def test_datasets(self): tokenizer, trainer = self.get_tokenizer_trainer() # In order to keep tests fast, we only use the first 100 examples os.environ["TOKENIZERS_PARALLELISM"] = "true" dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train[0:100]") # START def_batch_iterator def batch_iterator(batch_size=1000): # Only keep the text column to avoid decoding the rest of the columns unnecessarily tok_dataset = dataset.select_columns("text") for batch in tok_dataset.iter(batch_size): yield batch["text"] # END def_batch_iterator # START train_datasets tokenizer.train_from_iterator(batch_iterator(), trainer=trainer, length=len(dataset)) # END train_datasets def test_gzip(self, setup_gzip_files): tokenizer, trainer = self.get_tokenizer_trainer() # START single_gzip import gzip with gzip.open("data/my-file.0.gz", "rt") as f: tokenizer.train_from_iterator(f, trainer=trainer) # END single_gzip # START multi_gzip files = ["data/my-file.0.gz", "data/my-file.1.gz", "data/my-file.2.gz"] def gzip_iterator(): for path in files: with gzip.open(path, "rt") as f: for line in f: yield line tokenizer.train_from_iterator(gzip_iterator(), trainer=trainer) # END multi_gzip
tokenizers/bindings/python/tests/documentation/test_tutorial_train_from_iterators.py/0
{ "file_path": "tokenizers/bindings/python/tests/documentation/test_tutorial_train_from_iterators.py", "repo_id": "tokenizers", "token_count": 1595 }
293
# Input Sequences <tokenizerslangcontent> <python> These types represent all the different kinds of sequence that can be used as input of a Tokenizer. Globally, any sequence can be either a string or a list of strings, according to the operating mode of the tokenizer: `raw text` vs `pre-tokenized`. ## TextInputSequence[[tokenizers.TextInputSequence]] <code>tokenizers.TextInputSequence</code> A `str` that represents an input sequence ## PreTokenizedInputSequence[[tokenizers.PreTokenizedInputSequence]] <code>tokenizers.PreTokenizedInputSequence</code> A pre-tokenized input sequence. Can be one of: - A `List` of `str` - A `Tuple` of `str` alias of `Union[List[str], Tuple[str]]`. ## InputSequence[[tokenizers.InputSequence]] <code>tokenizers.InputSequence</code> Represents all the possible types of input sequences for encoding. Can be: - When `is_pretokenized=False`: [TextInputSequence](#tokenizers.TextInputSequence) - When `is_pretokenized=True`: [PreTokenizedInputSequence](#tokenizers.PreTokenizedInputSequence) alias of `Union[str, List[str], Tuple[str]]`. </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/input-sequences.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/input-sequences.mdx", "repo_id": "tokenizers", "token_count": 402 }
294
import re from sphinx.directives.other import TocTree class TocTreeTags(TocTree): hasPat = re.compile("^\s*:(.+):(.+)$") def filter_entries(self, entries): filtered = [] for e in entries: m = self.hasPat.match(e) if m != None: if self.env.app.tags.has(m.groups()[0]): filtered.append(m.groups()[1]) else: filtered.append(e) return filtered def run(self): self.content = self.filter_entries(self.content) return super().run() def setup(app): app.add_directive("toctree-tags", TocTreeTags) return { "version": "0.1", }
tokenizers/docs/source/_ext/toctree_tags.py/0
{ "file_path": "tokenizers/docs/source/_ext/toctree_tags.py", "repo_id": "tokenizers", "token_count": 345 }
295
#[macro_use] extern crate criterion; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; use std::time::{Duration, Instant}; use criterion::Criterion; use std::hint::black_box; use tokenizers::processors::template::TemplateProcessing; use tokenizers::{EncodeInput, Encoding, PostProcessor, Tokenizer}; /// Simple TemplateProcessing fn create_processor() -> TemplateProcessing { TemplateProcessing::builder() .try_single("[CLS]:0 $A:0 [SEP]:0") .unwrap() .try_pair("[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1") .unwrap() .special_tokens(vec![("[CLS]", 0), ("[SEP]", 1)]) .build() .unwrap() } pub fn bench_layout(c: &mut Criterion) { let processor = create_processor(); let tokenizer = Tokenizer::from_file("data/albert-base-v1-tokenizer.json").unwrap(); let mut encodeds: Vec<Encoding> = vec![]; for line in BufReader::new(File::open(Path::new("data/big.txt")).unwrap()).lines() { let line: EncodeInput = line.unwrap().into(); let encoded: Encoding = tokenizer.encode(line, false).unwrap(); encodeds.push(encoded); } c.bench_function("TemplateProcessing single encode", |b| { b.iter_custom(|iters| { let mut duration = Duration::new(0, 0); for i in 0..iters as usize { let encoded_index = i % encodeds.len(); let encoded: Encoding = encodeds[encoded_index].clone(); let start = Instant::now(); let _ = black_box(processor.process(encoded, None, false)); duration = duration.checked_add(start.elapsed()).unwrap(); } duration }) }); c.bench_function("TemplateProcessing pair encode", |b| { b.iter_custom(|iters| { let mut duration = Duration::new(0, 0); for i in 0..iters as usize { let encoded_index = i % encodeds.len(); let encoded: Encoding = encodeds[encoded_index].clone(); let encoded_index2 = (i + 1) % encodeds.len(); let pair: Encoding = encodeds[encoded_index2].clone(); let start = Instant::now(); let _ = black_box(processor.process(encoded, Some(pair), false)); duration = duration.checked_add(start.elapsed()).unwrap(); } duration }) }); } criterion_group! { name = layout_benches; config = Criterion::default().sample_size(20); targets = bench_layout } criterion_main!(layout_benches);
tokenizers/tokenizers/benches/layout_benchmark.rs/0
{ "file_path": "tokenizers/tokenizers/benches/layout_benchmark.rs", "repo_id": "tokenizers", "token_count": 1161 }
296
<div align="center"> <h1><code>create-wasm-app</code></h1> <strong>An <code>npm init</code> template for kick starting a project that uses NPM packages containing Rust-generated WebAssembly and bundles them with Webpack.</strong> <p> <a href="https://travis-ci.org/rustwasm/create-wasm-app"><img src="https://img.shields.io/travis/rustwasm/create-wasm-app.svg?style=flat-square" alt="Build Status" /></a> </p> <h3> <a href="#usage">Usage</a> <span> | </span> <a href="https://discordapp.com/channels/442252698964721669/443151097398296587">Chat</a> </h3> <sub>Built with 🦀🕸 by <a href="https://rustwasm.github.io/">The Rust and WebAssembly Working Group</a></sub> </div> ## About This template is designed for depending on NPM packages that contain Rust-generated WebAssembly and using them to create a Website. * Want to create an NPM package with Rust and WebAssembly? [Check out `wasm-pack-template`.](https://github.com/rustwasm/wasm-pack-template) * Want to make a monorepo-style Website without publishing to NPM? Check out [`rust-webpack-template`](https://github.com/rustwasm/rust-webpack-template) and/or [`rust-parcel-template`](https://github.com/rustwasm/rust-parcel-template). ## 🚴 Usage ``` npm init wasm-app ``` ## 🔋 Batteries Included - `.gitignore`: ignores `node_modules` - `LICENSE-APACHE` and `LICENSE-MIT`: most Rust projects are licensed this way, so these are included for you - `README.md`: the file you are reading now! - `index.html`: a bare bones html document that includes the webpack bundle - `index.js`: example js file with a comment showing how to import and use a wasm pkg - `package.json` and `package-lock.json`: - pulls in devDependencies for using webpack: - [`webpack`](https://www.npmjs.com/package/webpack) - [`webpack-cli`](https://www.npmjs.com/package/webpack-cli) - [`webpack-dev-server`](https://www.npmjs.com/package/webpack-dev-server) - defines a `start` script to run `webpack-dev-server` - `webpack.config.js`: configuration file for bundling your js with webpack ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
tokenizers/tokenizers/examples/unstable_wasm/www/README.md/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/README.md", "repo_id": "tokenizers", "token_count": 893 }
297
//! [WordPiece](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf) //! model. use crate::models::bpe::BPE; use crate::tokenizer::{Model, Result, Token}; use ahash::AHashMap; use std::collections::HashMap; use std::{ borrow::Cow, fs::File, io::prelude::*, io::{BufRead, BufReader}, path::{Path, PathBuf}, }; mod serialization; mod trainer; pub use trainer::*; #[derive(thiserror::Error, Debug)] pub enum Error { #[error("WordPiece error: Missing [UNK] token from the vocabulary")] MissingUnkToken, } type Vocab = AHashMap<String, u32>; type VocabR = AHashMap<u32, String>; struct Config { files: Option<String>, vocab: Vocab, unk_token: String, continuing_subword_prefix: String, max_input_chars_per_word: usize, } /// A `WordPieceBuilder` can be used to create a `WordPiece` model with a custom configuration. pub struct WordPieceBuilder { config: Config, } impl Default for WordPieceBuilder { fn default() -> Self { Self { config: Config { files: None, vocab: AHashMap::new(), unk_token: String::from("[UNK]"), continuing_subword_prefix: String::from("##"), max_input_chars_per_word: 100, }, } } } impl WordPieceBuilder { /// Construct a new `WordPieceBuilder`. pub fn new() -> Self { Self::default() } /// Set the input files. #[must_use] pub fn files(mut self, vocab: String) -> Self { self.config.files = Some(vocab); self } /// Set the vocab (token -> ID) mapping. #[must_use] pub fn vocab<V: Into<AHashMap<String, u32>>>(mut self, vocab: V) -> Self { self.config.vocab = vocab.into(); self } /// The the `UNK` token for the vocab. #[must_use] pub fn unk_token(mut self, unk_token: String) -> Self { self.config.unk_token = unk_token; self } /// Set the prefix for continuing subwords. #[must_use] pub fn continuing_subword_prefix(mut self, continuing_subword_prefix: String) -> Self { self.config.continuing_subword_prefix = continuing_subword_prefix; self } /// Set the maximum number of input characters per word. #[must_use] pub fn max_input_chars_per_word(mut self, max_input_chars_per_word: usize) -> Self { self.config.max_input_chars_per_word = max_input_chars_per_word; self } /// Constructs a `WordPiece` model that uses the `WordPieceBuilder`'s configuration. pub fn build(mut self) -> Result<WordPiece> { if let Some(vocab) = self.config.files { self.config.vocab = WordPiece::read_file(&vocab)?; } let vocab_r = self .config .vocab .iter() .map(|(key, val)| (*val, key.to_owned())) .collect(); Ok(WordPiece { vocab: self.config.vocab, vocab_r, unk_token: self.config.unk_token, continuing_subword_prefix: self.config.continuing_subword_prefix, max_input_chars_per_word: self.config.max_input_chars_per_word, }) } } /// A /// [WordPiece](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf) /// model. #[derive(Clone, PartialEq, Eq)] pub struct WordPiece { vocab: Vocab, vocab_r: VocabR, pub unk_token: String, pub continuing_subword_prefix: String, pub max_input_chars_per_word: usize, } impl std::fmt::Debug for WordPiece { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("WordPiece") .field("unk_token", &self.unk_token) .field("continuing_subword_prefix", &self.continuing_subword_prefix) .field("max_input_chars_per_word", &self.max_input_chars_per_word) .field("vocab", &self.vocab.len()) .finish() } } impl Default for WordPiece { fn default() -> Self { Self { vocab: AHashMap::new(), vocab_r: AHashMap::new(), unk_token: String::from("[UNK]"), continuing_subword_prefix: String::from("##"), max_input_chars_per_word: 100, } } } impl WordPiece { /// Get a `WordPieceBuilder`. pub fn builder() -> WordPieceBuilder { WordPieceBuilder::new() } /// Read the given files to extract the vocab pub fn read_file(vocab: &str) -> Result<Vocab> { let file = File::open(vocab)?; let file = BufReader::new(file); let mut vocab = AHashMap::new(); for (index, line) in file.lines().enumerate() { let line = line?; vocab.insert(line.trim_end().to_owned(), index as u32); } Ok(vocab) } pub fn read_bytes(vocab: &[u8]) -> Result<Vocab> { let file = BufReader::new(vocab); let mut vocab = HashMap::new(); for (index, line) in file.lines().enumerate() { let line = line?; vocab.insert(line.trim_end().to_owned(), index as u32); } Ok(vocab) } pub fn from_bytes<P: AsRef<[u8]>>(bytes: P) -> Result<Self> { let tokenizer = serde_json::from_slice(bytes.as_ref())?; Ok(tokenizer) } /// Initialize a `WordPiece` model from a vocab mapping file. pub fn from_file(vocab: &str) -> WordPieceBuilder { WordPiece::builder().files(vocab.to_owned()) } /// Create a `WordPiece` model from a `BPE` model. pub fn from_bpe(bpe: &BPE) -> Self { let mut wp = Self::builder() .vocab(bpe.get_vocab().into_iter().collect::<AHashMap<_, _>>()) .build() .unwrap(); if let Some(unk) = bpe.get_unk_token() { unk.clone_into(&mut wp.unk_token); } if let Some(prefix) = bpe.get_continuing_subword_prefix() { prefix.clone_into(&mut wp.continuing_subword_prefix); } wp } } impl Model for WordPiece { type Trainer = WordPieceTrainer; fn get_vocab(&self) -> HashMap<String, u32> { self.vocab.clone().into_iter().collect() } fn get_vocab_size(&self) -> usize { self.vocab.len() } fn tokenize(&self, sequence: &str) -> Result<Vec<Token>> { let char_len = sequence.chars().count(); if char_len > self.max_input_chars_per_word { return Ok(vec![Token { value: self.unk_token.clone(), id: *self .vocab .get(&self.unk_token) .ok_or(Error::MissingUnkToken)?, offsets: (0, sequence.len()), }]); } let mut is_bad = false; let mut start = 0; let mut sub_tokens: Vec<Token> = vec![]; while start < sequence.len() { let mut end = sequence.len(); let mut cur_str = None; while start < end { let mut substr: Cow<str> = Cow::Borrowed(&sequence[start..end]); if start > 0 { substr = Cow::Owned(format!("{}{}", self.continuing_subword_prefix, substr)); } if self.vocab.contains_key(substr.as_ref()) { cur_str = Some(Token { id: self.vocab[substr.as_ref()], value: substr.to_string(), offsets: (start, end), }); break; } end -= substr.chars().last().map_or(1, |c| c.len_utf8()); } if cur_str.is_none() { is_bad = true; break; } sub_tokens.push(cur_str.unwrap()); start = end; } if is_bad { Ok(vec![Token { value: self.unk_token.clone(), id: *self .vocab .get(&self.unk_token) .ok_or(Error::MissingUnkToken)?, offsets: (0, sequence.len()), }]) } else { Ok(sub_tokens) } } fn token_to_id(&self, token: &str) -> Option<u32> { self.vocab.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab_r.get(&id).cloned() } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { let vocab_file_name = match name { Some(name) => format!("{name}-vocab.txt"), None => "vocab.txt".to_string(), }; // Write vocab.txt let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())] .iter() .collect(); let mut vocab_file = File::create(&vocab_path)?; let mut vocab: Vec<(&String, &u32)> = self.vocab.iter().collect(); vocab.sort_unstable_by_key(|k| *k.1); vocab_file.write_all( &vocab .into_iter() .flat_map(|(token, _)| format!("{token}\n").as_bytes().to_owned()) .collect::<Vec<_>>()[..], )?; Ok(vec![vocab_path]) } fn get_trainer(&self) -> Self::Trainer { WordPieceTrainer::builder().build() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_error_display() { assert!(format!("{}", Error::MissingUnkToken).contains("Missing [UNK] token")); } }
tokenizers/tokenizers/src/models/wordpiece/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/wordpiece/mod.rs", "repo_id": "tokenizers", "token_count": 4756 }
298
use crate::normalizer::Range; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result}; use serde::{Deserialize, Serialize}; use crate::utils::macro_rules_attribute; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct FixedLength { #[serde(default = "default_length")] pub length: usize, } impl FixedLength { pub fn new(length: usize) -> Self { Self { length } } } fn default_length() -> usize { 5 } impl PreTokenizer for FixedLength { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, normalized| { let text = normalized.get(); if text.is_empty() { return Ok(vec![]); } let mut splits = Vec::new(); let char_positions: Vec<_> = text.char_indices().collect(); for chunk in char_positions.chunks(self.length) { let start = chunk.first().map(|(i, _)| *i).unwrap_or(0); let end = chunk .last() .map(|(i, c)| i + c.len_utf8()) .unwrap_or(text.len()); splits.push( normalized .slice(Range::Normalized(start..end)) .ok_or("Failed to slice normalized text")?, ); } Ok(splits) }) } } #[cfg(test)] mod tests { use super::*; use crate::{OffsetReferential, OffsetType, PreTokenizer}; #[test] fn basic() { let tests = vec![ ( "Hello world", vec![("Hello", (0, 5)), (" worl", (5, 10)), ("d", (10, 11))], ), ("Short", vec![("Short", (0, 5))]), ("", vec![]), ]; let pretok = FixedLength { length: 5 }; for (s, res) in tests { let mut pretokenized = PreTokenizedString::from(s); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), res ); } } #[test] fn custom_length() { let pretok = FixedLength { length: 3 }; let mut pretokenized = PreTokenizedString::from("Hello world"); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hel", (0, 3)), ("lo ", (3, 6)), ("wor", (6, 9)), ("ld", (9, 11)), ] ); } #[test] fn utf8_characters() { let pretok = FixedLength { length: 3 }; let mut pretokenized = PreTokenizedString::from("Hello 👋 world"); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hel", (0, 3)), ("lo ", (3, 6)), ("👋 w", (6, 12)), ("orl", (12, 15)), ("d", (15, 16)), ] ); } }
tokenizers/tokenizers/src/pre_tokenizers/fixed_length.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/fixed_length.rs", "repo_id": "tokenizers", "token_count": 2007 }
299
use crate::parallelism::*; use crate::tokenizer::{Offsets, Token}; use crate::utils::padding::PaddingDirection; use crate::utils::truncation::TruncationDirection; use ahash::AHashMap; use serde::{Deserialize, Serialize}; use std::ops::Range; /// Represents the output of a `Tokenizer`. #[derive(Default, PartialEq, Debug, Clone, Serialize, Deserialize)] pub struct Encoding { /// IDs produced by the `Tokenizer` ids: Vec<u32>, /// Type of the IDs type_ids: Vec<u32>, /// Tokens associated to each ID tokens: Vec<String>, /// Indice of the word associated to each token/ID words: Vec<Option<u32>>, /// Offsets of the token/ID from the NormalizedString offsets: Vec<Offsets>, /// Mask identifying special tokens special_tokens_mask: Vec<u32>, /// Mask identifying padding tokens for the attention mechanism attention_mask: Vec<u32>, /// A list of overflowing Encoding generated when we got truncated overflowing: Vec<Encoding>, /// Ranges of tokens covered by each sequence. If this is empty we consider /// there is only one sequence in this Encoding, and that it covers the entire range. sequence_ranges: AHashMap<usize, Range<usize>>, } impl Encoding { #[allow(clippy::too_many_arguments)] pub fn new( ids: Vec<u32>, type_ids: Vec<u32>, tokens: Vec<String>, words: Vec<Option<u32>>, offsets: Vec<Offsets>, special_tokens_mask: Vec<u32>, attention_mask: Vec<u32>, overflowing: Vec<Self>, sequence_ranges: AHashMap<usize, Range<usize>>, ) -> Self { Self { ids, type_ids, tokens, words, offsets, special_tokens_mask, attention_mask, overflowing, sequence_ranges, } } pub fn with_capacity(len: usize) -> Self { Self { ids: Vec::with_capacity(len), type_ids: Vec::with_capacity(len), tokens: Vec::with_capacity(len), words: Vec::with_capacity(len), offsets: Vec::with_capacity(len), special_tokens_mask: Vec::with_capacity(len), attention_mask: Vec::with_capacity(len), overflowing: vec![], sequence_ranges: AHashMap::new(), } } pub fn from_tokens(tokens: Vec<Token>, type_id: u32) -> Self { let length = tokens.len(); let (ids, tokens, offsets) = tokens.into_iter().fold( ( Vec::with_capacity(length), Vec::with_capacity(length), Vec::with_capacity(length), ), |(mut ids, mut tokens, mut offsets), t| { ids.push(t.id); tokens.push(t.value); offsets.push(t.offsets); (ids, tokens, offsets) }, ); Self { ids, tokens, offsets, words: vec![None; length], type_ids: vec![type_id; length], attention_mask: vec![1; length], special_tokens_mask: vec![0; length], overflowing: vec![], sequence_ranges: AHashMap::new(), } } /// Whether this Encoding is empty pub fn is_empty(&self) -> bool { self.ids.is_empty() } /// Return the total length of this Encoding pub fn len(&self) -> usize { self.ids.len() } /// Return the number of sequences combined in this Encoding pub fn n_sequences(&self) -> usize { if self.sequence_ranges.is_empty() { 1 } else { self.sequence_ranges.len() } } /// Set the given sequence id for the whole range of tokens contained in this Encoding pub fn set_sequence_id(&mut self, sequence_id: usize) { self.sequence_ranges.insert(sequence_id, 0..self.len()); } pub fn get_tokens(&self) -> &[String] { &self.tokens[..] } pub fn get_word_ids(&self) -> &[Option<u32>] { &self.words } pub fn get_word_ids_mut(&mut self) -> &mut [Option<u32>] { &mut self.words } pub fn get_sequence_ids(&self) -> Vec<Option<usize>> { let mut sequences = vec![None; self.len()]; for seq_id in 0..self.n_sequences() { let range = self.sequence_range(seq_id); let seq_len = range.len(); sequences.splice(range, std::iter::repeat_n(Some(seq_id), seq_len)); } sequences } pub fn get_ids(&self) -> &[u32] { &self.ids } pub fn get_type_ids(&self) -> &[u32] { &self.type_ids } pub fn set_type_ids(&mut self, type_ids: Vec<u32>) { self.type_ids = type_ids; } pub fn get_offsets(&self) -> &[Offsets] { &self.offsets } pub fn get_offsets_mut(&mut self) -> &mut [Offsets] { &mut self.offsets } pub fn get_special_tokens_mask(&self) -> &[u32] { &self.special_tokens_mask } pub fn get_attention_mask(&self) -> &[u32] { &self.attention_mask } pub fn get_overflowing(&self) -> &Vec<Encoding> { &self.overflowing } pub fn set_overflowing(&mut self, overflowing: Vec<Encoding>) { self.overflowing = overflowing; } pub fn get_overflowing_mut(&mut self) -> &mut Vec<Encoding> { &mut self.overflowing } pub fn take_overflowing(&mut self) -> Vec<Encoding> { std::mem::take(&mut self.overflowing) } pub(crate) fn process_tokens_with_offsets_mut<F>(&mut self, func: F) where F: FnMut((usize, (&String, &mut Offsets))), { self.tokens .iter() .zip(self.offsets.iter_mut()) .enumerate() .for_each(func) } /// Returns the range to target to retrieve something (word_id, offsets, ..) related to the /// given sequence id fn sequence_range(&self, sequence_id: usize) -> Range<usize> { self.sequence_ranges .get(&sequence_id) .cloned() .unwrap_or(0..self.len()) } /// Returns the index of the sequence containing the given token pub fn token_to_sequence(&self, token: usize) -> Option<usize> { if token > self.len() { None } else if self.sequence_ranges.is_empty() { Some(0) } else { self.sequence_ranges.iter().find_map(|(seq_id, range)| { if range.contains(&token) { Some(*seq_id) } else { None } }) } } /// Get the encoded tokens corresponding to the word at the given index in the input sequence, /// with the form (start_token, end_token + 1) pub fn word_to_tokens(&self, word: u32, sequence_id: usize) -> Option<(usize, usize)> { let (mut start, mut end) = (None, None); let sequence_range = self.sequence_range(sequence_id); self.words .get(sequence_range.clone())? .iter() .enumerate() .take_while(|(_, w)| **w <= Some(word)) .filter(|(_, w)| **w == Some(word)) .for_each(|(i, _)| { if start.is_none() || Some(i) < start { start = Some(i); } if end.is_none() || Some(i) >= end { end = Some(i + 1); } }); if let (Some(start), Some(end)) = (start, end) { Some((sequence_range.start + start, sequence_range.start + end)) } else { None } } /// Get the offsets of the word at the given index in the input sequence. pub fn word_to_chars(&self, word: u32, sequence_id: usize) -> Option<Offsets> { self.word_to_tokens(word, sequence_id) .and_then(|(start, end)| { if end == 0 { None } else { Some((self.offsets[start].0, self.offsets[end - 1].1)) } }) } /// Get the offsets of the token at the given index. pub fn token_to_chars(&self, token: usize) -> Option<(usize, Offsets)> { Some(( self.token_to_sequence(token)?, self.offsets.get(token).copied()?, )) } /// Get the word that contains the token at the given index. pub fn token_to_word(&self, token: usize) -> Option<(usize, u32)> { Some(( self.token_to_sequence(token)?, self.words.get(token).copied().flatten()?, )) } /// Get the token that contains the given char. pub fn char_to_token(&self, pos: usize, sequence_id: usize) -> Option<usize> { let sequence_range = self.sequence_range(sequence_id); self.offsets .get(sequence_range.clone())? .iter() .position(|(start, end)| pos >= *start && pos < *end) .map(|pos| sequence_range.start + pos) } /// Get the word that contains the given char. pub fn char_to_word(&self, pos: usize, sequence_id: usize) -> Option<u32> { Some( self.char_to_token(pos, sequence_id) .and_then(|token| self.token_to_word(token))? .1, ) } /// Truncate the current `Encoding`. /// /// Panics if `stride >= max_len` pub fn truncate(&mut self, max_len: usize, stride: usize, direction: TruncationDirection) { let encoding_len = self.ids.len(); if max_len >= encoding_len { return; } if max_len == 0 { let o = std::mem::replace(self, Encoding::with_capacity(0)); self.overflowing.push(o); return; } assert!(stride < max_len, "`stride` must be strictly less than `max_len={}` (note that `max_len` may be shorter than the max length of the original model, as it subtracts the number of special characters", max_len); // When truncating, we lose the `sequence_ranges` information. self.sequence_ranges.clear(); let offset = max_len - stride; let mut end = false; let parts_ranges: Vec<(usize, usize)> = match direction { TruncationDirection::Right => (0..encoding_len) .step_by(offset) .filter_map(|start| { if !end { let stop = std::cmp::min(start + max_len, encoding_len); end = stop == encoding_len; Some((start, stop)) } else { None } }) .collect(), TruncationDirection::Left => (0..encoding_len) .rev() .step_by(offset) .filter_map(|stop| { let stop = stop + 1; let start = stop.saturating_sub(max_len); if start < stop && !end { end = start == 0; Some((start, stop)) } else { None } }) .collect(), }; let mut i = 0; let (start, stop) = parts_ranges[i]; let mut new_encoding = Encoding { ids: self.ids[start..stop].to_vec(), type_ids: self.type_ids[start..stop].to_vec(), tokens: self.tokens[start..stop].to_vec(), words: self.words[start..stop].to_vec(), offsets: self.offsets[start..stop].to_vec(), special_tokens_mask: self.special_tokens_mask[start..stop].to_vec(), attention_mask: self.attention_mask[start..stop].to_vec(), overflowing: vec![], sequence_ranges: AHashMap::new(), }; loop { if i == parts_ranges.len() - 1 { break; } i += 1; let (start, stop) = parts_ranges[i]; new_encoding.overflowing.push(Encoding { ids: self.ids[start..stop].to_vec(), type_ids: self.type_ids[start..stop].to_vec(), tokens: self.tokens[start..stop].to_vec(), words: self.words[start..stop].to_vec(), offsets: self.offsets[start..stop].to_vec(), special_tokens_mask: self.special_tokens_mask[start..stop].to_vec(), attention_mask: self.attention_mask[start..stop].to_vec(), overflowing: vec![], sequence_ranges: AHashMap::new(), }); } *self = new_encoding; } /// Merge all Encodings together pub fn merge<I: IntoIterator<Item = Encoding>>(encodings: I, growing_offsets: bool) -> Self { let mut encoding = Encoding::default(); // TODO this is suboptimal as we're doing this iteratively instead of preallocating // all the encodings sizes all at once and only copying into this preallocated vector // https://github.com/huggingface/tokenizers/pull/1049 // In order to fix, we just need to preallocate all vectors, then copy everything // into it (and deal with overlowings correctly) for sub in encodings { encoding.merge_with(sub, growing_offsets); } encoding } /// Merge ourself with the given `Encoding`. Happens in place. pub fn merge_with(&mut self, pair: Encoding, growing_offsets: bool) { // Handle merging the overflowing parts too: Combine them all // In most of the cases, we expect `pair.overflowing.len() == 0` let mut overflowings = vec![]; // 1. All our overflowings with all the others for self_o in &self.overflowing { // 1. The pair itself let mut n_encoding = self_o.clone(); n_encoding.merge_with(pair.clone(), growing_offsets); overflowings.push(n_encoding); // 2. Its overflowings (this should rarely happen...) for other_o in &pair.overflowing { let mut n_encoding = self_o.clone(); n_encoding.merge_with(other_o.clone(), growing_offsets); overflowings.push(n_encoding); } } // 2. Ourself with all the other overflowings (this should rarely happen too...) for other_o in &pair.overflowing { let mut n_encoding = self.clone(); n_encoding.merge_with(other_o.clone(), growing_offsets); overflowings.push(n_encoding); } // Finish by merging ourself with the other encoding let original_self_len = self.len(); // Must be before any modification to self.ids self.sequence_ranges .extend(pair.sequence_ranges.into_iter().map(|(seq_id, range)| { ( seq_id, original_self_len + range.start..original_self_len + range.end, ) })); self.ids.extend(pair.ids); self.type_ids.extend(pair.type_ids); self.tokens.extend(pair.tokens); self.words.extend(pair.words); let starting_offset = if growing_offsets { self.offsets.last().map_or(0, |o| o.1) } else { 0 }; self.offsets.extend( pair.offsets .into_iter() .map(|(start, end)| (start + starting_offset, end + starting_offset)) .collect::<Vec<_>>(), ); self.special_tokens_mask.extend(pair.special_tokens_mask); self.attention_mask.extend(pair.attention_mask); self.overflowing = overflowings; } pub fn pad( &mut self, target_length: usize, pad_id: u32, pad_type_id: u32, pad_token: &str, direction: PaddingDirection, ) { // Dispatch call to all the overflowings first self.overflowing.maybe_par_iter_mut().for_each(|encoding| { encoding.pad(target_length, pad_id, pad_type_id, pad_token, direction) }); // Then check if we should pad ourself if self.ids.len() >= target_length { // We just do nothing if the wanted padding length is smaller than us return; } let pad_length = target_length - self.ids.len(); match direction { PaddingDirection::Left => { self.ids = (0..pad_length) .map(|_| pad_id) .chain(self.ids.drain(..)) .collect(); self.type_ids = (0..pad_length) .map(|_| pad_type_id) .chain(self.type_ids.drain(..)) .collect(); self.tokens = (0..pad_length) .map(|_| pad_token.to_owned()) .chain(self.tokens.drain(..)) .collect(); self.words = (0..pad_length) .map(|_| None) .chain(self.words.drain(..)) .collect(); self.attention_mask = (0..pad_length) .map(|_| 0) .chain(self.attention_mask.drain(..)) .collect(); self.special_tokens_mask = (0..pad_length) .map(|_| 1) .chain(self.special_tokens_mask.drain(..)) .collect(); self.offsets = (0..pad_length) .map(|_| (0, 0)) .chain(self.offsets.drain(..)) .collect(); self.sequence_ranges .iter_mut() .for_each(|(_seq_id, range)| { *range = (range.start + pad_length)..(range.end + pad_length) }); } PaddingDirection::Right => { self.ids.extend((0..pad_length).map(|_| pad_id)); self.type_ids.extend((0..pad_length).map(|_| pad_type_id)); self.tokens .extend((0..pad_length).map(|_| pad_token.to_owned())); self.words.extend((0..pad_length).map(|_| None)); self.attention_mask.extend((0..pad_length).map(|_| 0)); self.special_tokens_mask.extend((0..pad_length).map(|_| 1)); self.offsets.extend((0..pad_length).map(|_| (0, 0))); } } } } impl std::iter::FromIterator<Encoding> for Encoding { fn from_iter<I: IntoIterator<Item = Encoding>>(iter: I) -> Self { Self::merge(iter, false) } } impl std::iter::FromIterator<(u32, String, (usize, usize), Option<u32>, u32)> for Encoding { fn from_iter<I: IntoIterator<Item = (u32, String, (usize, usize), Option<u32>, u32)>>( iter: I, ) -> Self { let items = iter.into_iter(); let (lower, upper) = items.size_hint(); let length = upper.unwrap_or(lower); let mut encoding = Self::with_capacity(length); for (id, token, offsets, word, type_id) in items { encoding.ids.push(id); encoding.tokens.push(token); encoding.offsets.push(offsets); encoding.type_ids.push(type_id); encoding.words.push(word); encoding.special_tokens_mask.push(0); encoding.attention_mask.push(1); } encoding } } #[cfg(test)] mod tests { use super::*; use std::iter::FromIterator; #[test] fn merge_encodings() { let mut a = Encoding { ids: vec![1], type_ids: vec![0], tokens: vec![String::from("Hello ")], words: vec![Some(0)], offsets: vec![(0, 6)], special_tokens_mask: vec![0], attention_mask: vec![1], ..Default::default() }; let b = Encoding { ids: vec![2], type_ids: vec![1], tokens: vec![String::from("World!")], words: vec![Some(0)], offsets: vec![(0, 6)], special_tokens_mask: vec![0], attention_mask: vec![1], ..Default::default() }; a.merge_with(b, true); assert_eq!( a, Encoding { ids: vec![1, 2], type_ids: vec![0, 1], tokens: vec![String::from("Hello "), String::from("World!")], words: vec![Some(0), Some(0)], offsets: vec![(0, 6), (6, 12)], special_tokens_mask: vec![0, 0], attention_mask: vec![1, 1], ..Default::default() } ); } #[test] fn truncate() { let mut a = Encoding { ids: vec![1, 2, 3], type_ids: vec![0, 0, 0], tokens: vec![ String::from("Hello"), String::from("World"), String::from("!"), ], words: vec![Some(0), Some(1), Some(2)], offsets: vec![(0, 5), (6, 11), (11, 12)], special_tokens_mask: vec![0, 0, 0], attention_mask: vec![1, 1, 1], ..Default::default() }; a.truncate(2, 0, TruncationDirection::Right); assert_eq!( a, Encoding { ids: vec![1, 2], type_ids: vec![0, 0], tokens: vec![String::from("Hello"), String::from("World")], words: vec![Some(0), Some(1)], offsets: vec![(0, 5), (6, 11)], special_tokens_mask: vec![0, 0], attention_mask: vec![1, 1], overflowing: vec![Encoding { ids: vec![3], type_ids: vec![0], tokens: vec![String::from("!")], words: vec![Some(2)], offsets: vec![(11, 12)], special_tokens_mask: vec![0], attention_mask: vec![1], ..Default::default() }], ..Default::default() } ); } #[test] fn truncate_to_empty() { let mut a = Encoding { ids: vec![1, 2, 3], type_ids: vec![0, 0, 0], tokens: vec![ String::from("Hello"), String::from("World"), String::from("!"), ], words: vec![Some(0), Some(1), Some(2)], offsets: vec![(0, 5), (6, 11), (11, 12)], special_tokens_mask: vec![0, 0, 0], attention_mask: vec![1, 1, 1], ..Default::default() }; a.truncate(0, 0, TruncationDirection::Right); assert_eq!( a, Encoding { overflowing: vec![Encoding { ids: vec![1, 2, 3], type_ids: vec![0, 0, 0], tokens: vec![ String::from("Hello"), String::from("World"), String::from("!"), ], words: vec![Some(0), Some(1), Some(2)], offsets: vec![(0, 5), (6, 11), (11, 12)], special_tokens_mask: vec![0, 0, 0], attention_mask: vec![1, 1, 1], overflowing: vec![], ..Default::default() }], ..Default::default() } ); } #[test] fn truncate_overflow_with_stride() { let mut enc = Encoding { ids: vec![1, 2, 3, 4, 5], type_ids: vec![0, 0, 0, 0, 0], tokens: vec![ String::from("42"), String::from("is"), String::from("the"), String::from("answer"), String::from("!"), ], words: vec![Some(0), Some(1), Some(2), Some(3), Some(4)], offsets: vec![(0, 2), (2, 4), (4, 7), (7, 13), (13, 14)], special_tokens_mask: vec![0, 0, 0, 0, 0], attention_mask: vec![1, 1, 1, 1, 1], overflowing: vec![], ..Default::default() }; enc.truncate(4, 2, TruncationDirection::Right); assert_eq!( enc, Encoding { ids: vec![1, 2, 3, 4], type_ids: vec![0, 0, 0, 0], tokens: vec![ String::from("42"), String::from("is"), String::from("the"), String::from("answer"), ], words: vec![Some(0), Some(1), Some(2), Some(3)], offsets: vec![(0, 2), (2, 4), (4, 7), (7, 13)], special_tokens_mask: vec![0, 0, 0, 0], attention_mask: vec![1, 1, 1, 1], overflowing: vec![Encoding { ids: vec![3, 4, 5], type_ids: vec![0, 0, 0], tokens: vec![ String::from("the"), String::from("answer"), String::from("!"), ], words: vec![Some(2), Some(3), Some(4)], offsets: vec![(4, 7), (7, 13), (13, 14)], special_tokens_mask: vec![0, 0, 0], attention_mask: vec![1, 1, 1], overflowing: vec![], ..Default::default() }], ..Default::default() } ); } #[test] fn truncate_left() { let mut a = Encoding { ids: vec![1, 2, 3], type_ids: vec![0, 0, 0], tokens: vec![ String::from("Hello"), String::from("World"), String::from("!"), ], words: vec![Some(0), Some(1), Some(2)], offsets: vec![(0, 5), (6, 11), (11, 12)], special_tokens_mask: vec![0, 0, 0], attention_mask: vec![1, 1, 1], ..Default::default() }; a.truncate(2, 0, TruncationDirection::Left); assert_eq!( a, Encoding { ids: vec![2, 3], type_ids: vec![0, 0], tokens: vec![String::from("World"), String::from("!")], words: vec![Some(1), Some(2)], offsets: vec![(6, 11), (11, 12)], special_tokens_mask: vec![0, 0], attention_mask: vec![1, 1], overflowing: vec![Encoding { ids: vec![1], type_ids: vec![0], tokens: vec![String::from("Hello")], words: vec![Some(0)], offsets: vec![(0, 5)], special_tokens_mask: vec![0], attention_mask: vec![1], ..Default::default() }], ..Default::default() } ); } #[test] fn mappings() { let encoding = Encoding { ids: vec![0; 11], // Needed for Encoding::len tokens: vec![ // First sequence: "He".into(), "llo".into(), "won".into(), "der".into(), "ful".into(), "friend".into(), "!".into(), // Second sequence: "How".into(), "are".into(), "you".into(), "?".into(), ], offsets: vec![ // First sequence: (0, 2), (2, 5), (7, 10), (10, 13), (13, 16), (17, 23), (23, 24), // Second sequence: (0, 3), (4, 7), (8, 11), (11, 12), ], words: vec![ // First sequence: Some(0), Some(0), Some(1), Some(1), Some(1), Some(2), Some(3), // Second sequence: Some(0), Some(1), Some(2), Some(3), ], sequence_ranges: AHashMap::from_iter(vec![(0, 0..7), (1, 7..11)]), ..Default::default() }; assert_eq!(encoding.word_to_tokens(0, 0), Some((0, 2))); assert_eq!(encoding.word_to_tokens(1, 0), Some((2, 5))); assert_eq!(encoding.word_to_tokens(2, 0), Some((5, 6))); assert_eq!(encoding.word_to_tokens(3, 0), Some((6, 7))); assert_eq!(encoding.word_to_tokens(0, 1), Some((7, 8))); assert_eq!(encoding.word_to_tokens(1, 1), Some((8, 9))); assert_eq!(encoding.word_to_tokens(2, 1), Some((9, 10))); assert_eq!(encoding.word_to_tokens(3, 1), Some((10, 11))); assert_eq!(encoding.word_to_chars(0, 0), Some((0, 5))); assert_eq!(encoding.word_to_chars(1, 0), Some((7, 16))); assert_eq!(encoding.word_to_chars(0, 1), Some((0, 3))); assert_eq!(encoding.word_to_chars(1, 1), Some((4, 7))); assert_eq!(encoding.token_to_chars(0), Some((0, (0, 2)))); assert_eq!(encoding.token_to_chars(1), Some((0, (2, 5)))); assert_eq!(encoding.token_to_chars(7), Some((1, (0, 3)))); assert_eq!(encoding.token_to_chars(9), Some((1, (8, 11)))); assert_eq!(encoding.token_to_word(1), Some((0, 0))); assert_eq!(encoding.token_to_word(2), Some((0, 1))); assert_eq!(encoding.token_to_word(7), Some((1, 0))); assert_eq!(encoding.token_to_word(9), Some((1, 2))); assert_eq!(encoding.token_to_word(11), None); assert_eq!(encoding.char_to_token(3, 0), Some(1)); assert_eq!(encoding.char_to_token(8, 0), Some(2)); assert_eq!(encoding.char_to_token(16, 0), None); assert_eq!(encoding.char_to_token(23, 0), Some(6)); assert_eq!(encoding.char_to_token(2, 1), Some(7)); assert_eq!(encoding.char_to_token(9, 1), Some(9)); assert_eq!(encoding.char_to_word(3, 0), Some(0)); assert_eq!(encoding.char_to_word(8, 0), Some(1)); assert_eq!(encoding.char_to_word(16, 0), None); assert_eq!(encoding.char_to_word(23, 0), Some(3)); assert_eq!(encoding.char_to_word(2, 1), Some(0)); assert_eq!(encoding.char_to_word(9, 1), Some(2)); } #[test] fn padding() { let mut a = Encoding { ids: vec![1], type_ids: vec![0], tokens: vec![String::from("Hello ")], words: vec![Some(0)], offsets: vec![(0, 6)], special_tokens_mask: vec![0], attention_mask: vec![1], sequence_ranges: AHashMap::from([(0, 0..1)]), ..Default::default() }; let target_length = 2; let pad_id = 99; let pad_type_id = 0; let pad_token = "[PAD]"; a.pad( target_length, pad_id, pad_type_id, pad_token, PaddingDirection::Left, ); assert_eq!(a.sequence_ranges, AHashMap::from([(0, 1..2)])); } }
tokenizers/tokenizers/src/tokenizer/encoding.rs/0
{ "file_path": "tokenizers/tokenizers/src/tokenizer/encoding.rs", "repo_id": "tokenizers", "token_count": 17200 }
300
mod common; use common::*; use tokenizers::tokenizer::AddedToken; #[test] fn add_tokens() { let mut tokenizer = get_empty(); assert_eq!( tokenizer.add_special_tokens(&[ AddedToken::from("<cls>", true), AddedToken::from("<sep>", true) ]), 2 ); assert_eq!(tokenizer.token_to_id("<cls>"), Some(0)); assert_eq!(tokenizer.token_to_id("<sep>"), Some(1)); assert_eq!( tokenizer.add_tokens(&[ AddedToken::from("hello", false), AddedToken::from("world", false) ]), 2 ); assert_eq!(tokenizer.token_to_id("hello"), Some(2)); assert_eq!(tokenizer.token_to_id("world"), Some(3)); } #[test] fn lstrip_tokens() { let mut tokenizer = get_byte_level(true, false); tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).lstrip(true)]); let input = "I saw a <mask> 😺"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!( output.get_tokens(), &["ĠI", "Ġsaw", "Ġa", " <mask>", "ĠðŁĺ", "º"] ); assert_eq!( output.get_offsets(), &[(0, 1), (1, 5), (5, 7), (7, 14), (14, 19), (15, 19)] ); } #[test] fn rstrip_tokens() { let mut tokenizer = get_byte_level(false, false); tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).rstrip(true)]); let input = "I saw a <mask> 😺"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!( output.get_tokens(), &["I", "Ġsaw", "Ġa", "Ġ", "<mask> ", "ðŁĺ", "º"] ); // When `add_prefix_space = true` rstrip cannot work as a prefix space is added // to the next token let mut tokenizer = get_byte_level(true, false); tokenizer.add_special_tokens(&[AddedToken::from("<mask>", true).rstrip(true)]); let input = "I saw a <mask> 😺"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!( output.get_tokens(), &["ĠI", "Ġsaw", "Ġa", "Ġ", "<mask> ", "ĠðŁĺ", "º"] ); } #[test] fn single_word_tokens() { // If `single_word = true` it shouldn't split `dancing` let mut tokenizer = get_byte_level(false, false); tokenizer.add_special_tokens(&[AddedToken::from("ing", true).single_word(true)]); let input = "I like dancing"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġdancing"]); // If `single_word = false` it should split `dancing` let mut tokenizer = get_byte_level(false, false); tokenizer.add_special_tokens(&[AddedToken::from("ing", true).single_word(false)]); let input = "I like dancing"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġd", "anc", "ing"]); } #[test] fn overlapping_tokens() { let mut tokenizer = get_byte_level(false, false); tokenizer.add_special_tokens(&[AddedToken::from("danc", true)]); tokenizer.add_special_tokens(&[AddedToken::from("nci", true)]); tokenizer.add_special_tokens(&[AddedToken::from("ing", true)]); let input = "I like dancing"; let output = tokenizer.encode(input, false).unwrap(); assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġ", "danc", "ing"]); let mut tokenizer = get_byte_level(false, false); tokenizer.add_special_tokens(&[AddedToken::from("nci", true)]); tokenizer.add_special_tokens(&[AddedToken::from("danc", true)]); tokenizer.add_special_tokens(&[AddedToken::from("ing", true)]); tokenizer.add_special_tokens(&[AddedToken::from("ike", true)]); let output = tokenizer.encode(input, false).unwrap(); // Breaking change but following `transformers` breaking change. // This behavior is deemed not used in practice: // https://github.com/huggingface/transformers/pull/13220 // Order does NOT matter. (We could make it work again but the trie // would need to keep insertion order too) // // assert_eq!(output.get_tokens(), &["I", "Ġlike", "Ġda", "nci", "ng"]); assert_eq!(output.get_tokens(), &["I", "Ġl", "ike", "Ġ", "danc", "ing"]); }
tokenizers/tokenizers/tests/added_tokens.rs/0
{ "file_path": "tokenizers/tokenizers/tests/added_tokens.rs", "repo_id": "tokenizers", "token_count": 1770 }
301
# Building a React application In this tutorial, we'll be building a simple React application that performs multilingual translation using Transformers.js! The final product will look something like this: ![Demo](https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/react-translator-demo.gif) Useful links: - [Demo site](https://huggingface.co/spaces/Xenova/react-translator) - [Source code](https://github.com/huggingface/transformers.js-examples/tree/main/react-translator) ## Prerequisites - [Node.js](https://nodejs.org/en/) version 18+ - [npm](https://www.npmjs.com/) version 9+ ## Step 1: Initialise the project For this tutorial, we will use [Vite](https://vitejs.dev/) to initialise our project. Vite is a build tool that allows us to quickly set up a React application with minimal configuration. Run the following command in your terminal: ```bash npm create vite@latest react-translator -- --template react ``` If prompted to install `create-vite`, type <kbd>y</kbd> and press <kbd>Enter</kbd>. Next, enter the project directory and install the necessary development dependencies: ```bash cd react-translator npm install ``` To test that our application is working, we can run the following command: ```bash npm run dev ``` Visiting the URL shown in the terminal (e.g., [http://localhost:5173/](http://localhost:5173/)) should show the default "React + Vite" landing page. You can stop the development server by pressing <kbd>Ctrl</kbd> + <kbd>C</kbd> in the terminal. ## Step 2: Install and configure Transformers.js Now we get to the fun part: adding machine learning to our application! First, install Transformers.js from [NPM](https://www.npmjs.com/package/@huggingface/transformers) with the following command: ```bash npm install @huggingface/transformers ``` For this application, we will use the [Xenova/nllb-200-distilled-600M](https://huggingface.co/Xenova/nllb-200-distilled-600M) model, which can perform multilingual translation among 200 languages. Before we start, there are 2 things we need to take note of: 1. ML inference can be quite computationally intensive, so it's better to load and run the models in a separate thread from the main (UI) thread. 2. Since the model is quite large (>1 GB), we don't want to download it until the user clicks the "Translate" button. We can achieve both of these goals by using a [Web Worker](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers) and some [React hooks](https://react.dev/reference/react). 1. Create a file called `worker.js` in the `src` directory. This script will do all the heavy-lifing for us, including loading and running of the translation pipeline. To ensure the model is only loaded once, we will create the `MyTranslationPipeline` class which use the [singleton pattern](https://en.wikipedia.org/wiki/Singleton_pattern) to lazily create a single instance of the pipeline when `getInstance` is first called, and use this pipeline for all subsequent calls: ```javascript import { pipeline, TextStreamer } from '@huggingface/transformers'; class MyTranslationPipeline { static task = 'translation'; static model = 'Xenova/nllb-200-distilled-600M'; static instance = null; static async getInstance(progress_callback = null) { this.instance ??= pipeline(this.task, this.model, { progress_callback }); return this.instance; } } ``` 2. Modify `App.jsx` in the `src` directory. This file is automatically created when initializing our React project, and will contain some boilerplate code. Inside the `App` function, let's create the web worker and store a reference to it using the `useRef` hook: ```jsx // Remember to import the relevant hooks import { useEffect, useRef, useState } from 'react' import './App.css' function App() { // Create a reference to the worker object. const worker = useRef(null); // We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted. useEffect(() => { // Create the worker if it does not yet exist. worker.current ??= new Worker(new URL('./worker.js', import.meta.url), { type: 'module' }); // Create a callback function for messages from the worker thread. const onMessageReceived = (e) => { // TODO: Will fill in later }; // Attach the callback function as an event listener. worker.current.addEventListener('message', onMessageReceived); // Define a cleanup function for when the component is unmounted. return () => worker.current.removeEventListener('message', onMessageReceived); }); return ( // TODO: Rest of our app goes here... ) } export default App ``` ## Step 3: Design the user interface <Tip> We recommend starting the development server again with `npm run dev` (if not already running) so that you can see your changes in real-time. </Tip> First, let's define our components. Create a folder called `components` in the `src` directory, and create the following files: 1. `LanguageSelector.jsx`: This component will allow the user to select the input and output languages. Check out the full list of languages [here](https://github.com/huggingface/transformers.js-examples/tree/main/react-translator/src/components/LanguageSelector.jsx). ```jsx const LANGUAGES = { "Acehnese (Arabic script)": "ace_Arab", "Acehnese (Latin script)": "ace_Latn", "Afrikaans": "afr_Latn", ... "Zulu": "zul_Latn", } export default function LanguageSelector({ type, onChange, defaultLanguage }) { return ( <div className='language-selector'> <label>{type}: </label> <select onChange={onChange} defaultValue={defaultLanguage}> {Object.entries(LANGUAGES).map(([key, value]) => { return <option key={key} value={value}>{key}</option> })} </select> </div> ) } ``` 2. `Progress.jsx`: This component will display the progress for downloading each model file. ```jsx export default function Progress({ text, percentage }) { percentage = percentage ?? 0; return ( <div className="progress-container"> <div className='progress-bar' style={{ 'width': `${percentage}%` }}> {text} ({`${percentage.toFixed(2)}%`}) </div> </div> ); } ``` We can now use these components in `App.jsx` by adding these imports to the top of the file: ```jsx import LanguageSelector from './components/LanguageSelector'; import Progress from './components/Progress'; ``` Let's also add some state variables to keep track of a few things in our application, like model loading, languages, input text, and output text. Add the following code to the beginning of the `App` function in `src/App.jsx`: ```jsx function App() { // Model loading const [ready, setReady] = useState(null); const [disabled, setDisabled] = useState(false); const [progressItems, setProgressItems] = useState([]); // Inputs and outputs const [input, setInput] = useState('I love walking my dog.'); const [sourceLanguage, setSourceLanguage] = useState('eng_Latn'); const [targetLanguage, setTargetLanguage] = useState('fra_Latn'); const [output, setOutput] = useState(''); // rest of the code... } ``` Next, we can add our custom components to the main `App` component. We will also add two `textarea` elements for input and output text, and a `button` to trigger the translation. Modify the `return` statement to look like this: ```jsx return ( <> <h1>Transformers.js</h1> <h2>ML-powered multilingual translation in React!</h2> <div className='container'> <div className='language-container'> <LanguageSelector type={"Source"} defaultLanguage={"eng_Latn"} onChange={x => setSourceLanguage(x.target.value)} /> <LanguageSelector type={"Target"} defaultLanguage={"fra_Latn"} onChange={x => setTargetLanguage(x.target.value)} /> </div> <div className='textbox-container'> <textarea value={input} rows={3} onChange={e => setInput(e.target.value)}></textarea> <textarea value={output} rows={3} readOnly></textarea> </div> </div> <button disabled={disabled} onClick={translate}>Translate</button> <div className='progress-bars-container'> {ready === false && ( <label>Loading models... (only run once)</label> )} {progressItems.map(data => ( <div key={data.file}> <Progress text={data.file} percentage={data.progress} /> </div> ))} </div> </> ) ``` Don't worry about the `translate` function for now. We will define it in the next section. Finally, we can add some CSS to make our app look a little nicer. Modify the following files in the `src` directory: 1. `index.css`: <details> <summary>View code</summary> ```css :root { font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif; line-height: 1.5; font-weight: 400; color: #213547; background-color: #ffffff; font-synthesis: none; text-rendering: optimizeLegibility; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; -webkit-text-size-adjust: 100%; } body { margin: 0; display: flex; place-items: center; min-width: 320px; min-height: 100vh; } h1 { font-size: 3.2em; line-height: 1; } h1, h2 { margin: 8px; } select { padding: 0.3em; cursor: pointer; } textarea { padding: 0.6em; } button { padding: 0.6em 1.2em; cursor: pointer; font-weight: 500; } button[disabled] { cursor: not-allowed; } select, textarea, button { border-radius: 8px; border: 1px solid transparent; font-size: 1em; font-family: inherit; background-color: #f9f9f9; transition: border-color 0.25s; } select:hover, textarea:hover, button:not([disabled]):hover { border-color: #646cff; } select:focus, select:focus-visible, textarea:focus, textarea:focus-visible, button:focus, button:focus-visible { outline: 4px auto -webkit-focus-ring-color; } ``` </details> 1. `App.css` <details> <summary>View code</summary> ```css #root { max-width: 1280px; margin: 0 auto; padding: 2rem; text-align: center; } .language-container { display: flex; gap: 20px; } .textbox-container { display: flex; justify-content: center; gap: 20px; width: 800px; } .textbox-container>textarea, .language-selector { width: 50%; } .language-selector>select { width: 150px; } .progress-container { position: relative; font-size: 14px; color: white; background-color: #e9ecef; border: solid 1px; border-radius: 8px; text-align: left; overflow: hidden; } .progress-bar { padding: 0 4px; z-index: 0; top: 0; width: 1%; overflow: hidden; background-color: #007bff; white-space: nowrap; } .progress-text { z-index: 2; } .selector-container { display: flex; gap: 20px; } .progress-bars-container { padding: 8px; height: 140px; } .container { margin: 25px; display: flex; flex-direction: column; gap: 10px; } ``` </details> ## Step 4: Connecting everything together Now that we have a basic user interface set up, we can finally connect everything together. First, let's define the `translate` function, which will be called when the user clicks the `Translate` button. This sends a message (containing the input text, source language, and target language) to the worker thread for processing. We will also disable the button so the user doesn't click it multiple times. Add the following code just before the `return` statement in the `App` function: ```jsx const translate = () => { setDisabled(true); setOutput(''); worker.current.postMessage({ text: input, src_lang: sourceLanguage, tgt_lang: targetLanguage, }); } ``` Now, let's add an event listener in `src/worker.js` to listen for messages from the main thread. We will send back messages (e.g., for model loading progress and text streaming) to the main thread with `self.postMessage`. ```javascript // Listen for messages from the main thread self.addEventListener('message', async (event) => { // Retrieve the translation pipeline. When called for the first time, // this will load the pipeline and save it for future use. const translator = await MyTranslationPipeline.getInstance(x => { // We also add a progress callback to the pipeline so that we can // track model loading. self.postMessage(x); }); // Capture partial output as it streams from the pipeline const streamer = new TextStreamer(translator.tokenizer, { skip_prompt: true, skip_special_tokens: true, callback_function: function (text) { self.postMessage({ status: 'update', output: text }); } }); // Actually perform the translation const output = await translator(event.data.text, { tgt_lang: event.data.tgt_lang, src_lang: event.data.src_lang, // Allows for partial output to be captured streamer, }); // Send the output back to the main thread self.postMessage({ status: 'complete', output, }); }); ``` Finally, let's fill in our `onMessageReceived` function in `src/App.jsx`, which will update the application state in response to messages from the worker thread. Add the following code inside the `useEffect` hook we defined earlier: ```jsx const onMessageReceived = (e) => { switch (e.data.status) { case 'initiate': // Model file start load: add a new progress item to the list. setReady(false); setProgressItems(prev => [...prev, e.data]); break; case 'progress': // Model file progress: update one of the progress items. setProgressItems( prev => prev.map(item => { if (item.file === e.data.file) { return { ...item, progress: e.data.progress } } return item; }) ); break; case 'done': // Model file loaded: remove the progress item from the list. setProgressItems( prev => prev.filter(item => item.file !== e.data.file) ); break; case 'ready': // Pipeline ready: the worker is ready to accept messages. setReady(true); break; case 'update': // Generation update: update the output text. setOutput(o => o + e.data.output); break; case 'complete': // Generation complete: re-enable the "Translate" button setDisabled(false); break; } }; ``` You can now run the application with `npm run dev` and perform multilingual translation directly in your browser! ## (Optional) Step 5: Build and deploy To build your application, simply run `npm run build`. This will bundle your application and output the static files to the `dist` folder. For this demo, we will deploy our application as a static [Hugging Face Space](https://huggingface.co/docs/hub/spaces), but you can deploy it anywhere you like! If you haven't already, you can create a free Hugging Face account [here](https://huggingface.co/join). 1. Visit [https://huggingface.co/new-space](https://huggingface.co/new-space) and fill in the form. Remember to select "Static" as the space type. 2. Go to "Files" &rarr; "Add file" &rarr; "Upload files". Drag the `index.html` file and `public/` folder from the `dist` folder into the upload box and click "Upload". After they have uploaded, scroll down to the button and click "Commit changes to main". **That's it!** Your application should now be live at `https://huggingface.co/spaces/<your-username>/<your-space-name>`!
transformers.js/docs/source/tutorials/react.md/0
{ "file_path": "transformers.js/docs/source/tutorials/react.md", "repo_id": "transformers.js", "token_count": 5891 }
302
@font-face { font-family: "bootstrap-icons"; src: url("./bootstrap-icons.woff2") format("woff2"), url("./bootstrap-icons.woff") format("woff"); } .bi::before, [class^="bi-"]::before, [class*=" bi-"]::before { display: inline-block; font-family: bootstrap-icons !important; font-style: normal; font-weight: normal !important; font-variant: normal; text-transform: none; line-height: 1; vertical-align: -.125em; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; } .bi-123::before { content: "\f67f"; } .bi-alarm-fill::before { content: "\f101"; } .bi-alarm::before { content: "\f102"; } .bi-align-bottom::before { content: "\f103"; } .bi-align-center::before { content: "\f104"; } .bi-align-end::before { content: "\f105"; } .bi-align-middle::before { content: "\f106"; } .bi-align-start::before { content: "\f107"; } .bi-align-top::before { content: "\f108"; } .bi-alt::before { content: "\f109"; } .bi-app-indicator::before { content: "\f10a"; } .bi-app::before { content: "\f10b"; } .bi-archive-fill::before { content: "\f10c"; } .bi-archive::before { content: "\f10d"; } .bi-arrow-90deg-down::before { content: "\f10e"; } .bi-arrow-90deg-left::before { content: "\f10f"; } .bi-arrow-90deg-right::before { content: "\f110"; } .bi-arrow-90deg-up::before { content: "\f111"; } .bi-arrow-bar-down::before { content: "\f112"; } .bi-arrow-bar-left::before { content: "\f113"; } .bi-arrow-bar-right::before { content: "\f114"; } .bi-arrow-bar-up::before { content: "\f115"; } .bi-arrow-clockwise::before { content: "\f116"; } .bi-arrow-counterclockwise::before { content: "\f117"; } .bi-arrow-down-circle-fill::before { content: "\f118"; } .bi-arrow-down-circle::before { content: "\f119"; } .bi-arrow-down-left-circle-fill::before { content: "\f11a"; } .bi-arrow-down-left-circle::before { content: "\f11b"; } .bi-arrow-down-left-square-fill::before { content: "\f11c"; } .bi-arrow-down-left-square::before { content: "\f11d"; } .bi-arrow-down-left::before { content: "\f11e"; } .bi-arrow-down-right-circle-fill::before { content: "\f11f"; } .bi-arrow-down-right-circle::before { content: "\f120"; } .bi-arrow-down-right-square-fill::before { content: "\f121"; } .bi-arrow-down-right-square::before { content: "\f122"; } .bi-arrow-down-right::before { content: "\f123"; } .bi-arrow-down-short::before { content: "\f124"; } .bi-arrow-down-square-fill::before { content: "\f125"; } .bi-arrow-down-square::before { content: "\f126"; } .bi-arrow-down-up::before { content: "\f127"; } .bi-arrow-down::before { content: "\f128"; } .bi-arrow-left-circle-fill::before { content: "\f129"; } .bi-arrow-left-circle::before { content: "\f12a"; } .bi-arrow-left-right::before { content: "\f12b"; } .bi-arrow-left-short::before { content: "\f12c"; } .bi-arrow-left-square-fill::before { content: "\f12d"; } .bi-arrow-left-square::before { content: "\f12e"; } .bi-arrow-left::before { content: "\f12f"; } .bi-arrow-repeat::before { content: "\f130"; } .bi-arrow-return-left::before { content: "\f131"; } .bi-arrow-return-right::before { content: "\f132"; } .bi-arrow-right-circle-fill::before { content: "\f133"; } .bi-arrow-right-circle::before { content: "\f134"; } .bi-arrow-right-short::before { content: "\f135"; } .bi-arrow-right-square-fill::before { content: "\f136"; } .bi-arrow-right-square::before { content: "\f137"; } .bi-arrow-right::before { content: "\f138"; } .bi-arrow-up-circle-fill::before { content: "\f139"; } .bi-arrow-up-circle::before { content: "\f13a"; } .bi-arrow-up-left-circle-fill::before { content: "\f13b"; } .bi-arrow-up-left-circle::before { content: "\f13c"; } .bi-arrow-up-left-square-fill::before { content: "\f13d"; } .bi-arrow-up-left-square::before { content: "\f13e"; } .bi-arrow-up-left::before { content: "\f13f"; } .bi-arrow-up-right-circle-fill::before { content: "\f140"; } .bi-arrow-up-right-circle::before { content: "\f141"; } .bi-arrow-up-right-square-fill::before { content: "\f142"; } .bi-arrow-up-right-square::before { content: "\f143"; } .bi-arrow-up-right::before { content: "\f144"; } .bi-arrow-up-short::before { content: "\f145"; } .bi-arrow-up-square-fill::before { content: "\f146"; } .bi-arrow-up-square::before { content: "\f147"; } .bi-arrow-up::before { content: "\f148"; } .bi-arrows-angle-contract::before { content: "\f149"; } .bi-arrows-angle-expand::before { content: "\f14a"; } .bi-arrows-collapse::before { content: "\f14b"; } .bi-arrows-expand::before { content: "\f14c"; } .bi-arrows-fullscreen::before { content: "\f14d"; } .bi-arrows-move::before { content: "\f14e"; } .bi-aspect-ratio-fill::before { content: "\f14f"; } .bi-aspect-ratio::before { content: "\f150"; } .bi-asterisk::before { content: "\f151"; } .bi-at::before { content: "\f152"; } .bi-award-fill::before { content: "\f153"; } .bi-award::before { content: "\f154"; } .bi-back::before { content: "\f155"; } .bi-backspace-fill::before { content: "\f156"; } .bi-backspace-reverse-fill::before { content: "\f157"; } .bi-backspace-reverse::before { content: "\f158"; } .bi-backspace::before { content: "\f159"; } .bi-badge-3d-fill::before { content: "\f15a"; } .bi-badge-3d::before { content: "\f15b"; } .bi-badge-4k-fill::before { content: "\f15c"; } .bi-badge-4k::before { content: "\f15d"; } .bi-badge-8k-fill::before { content: "\f15e"; } .bi-badge-8k::before { content: "\f15f"; } .bi-badge-ad-fill::before { content: "\f160"; } .bi-badge-ad::before { content: "\f161"; } .bi-badge-ar-fill::before { content: "\f162"; } .bi-badge-ar::before { content: "\f163"; } .bi-badge-cc-fill::before { content: "\f164"; } .bi-badge-cc::before { content: "\f165"; } .bi-badge-hd-fill::before { content: "\f166"; } .bi-badge-hd::before { content: "\f167"; } .bi-badge-tm-fill::before { content: "\f168"; } .bi-badge-tm::before { content: "\f169"; } .bi-badge-vo-fill::before { content: "\f16a"; } .bi-badge-vo::before { content: "\f16b"; } .bi-badge-vr-fill::before { content: "\f16c"; } .bi-badge-vr::before { content: "\f16d"; } .bi-badge-wc-fill::before { content: "\f16e"; } .bi-badge-wc::before { content: "\f16f"; } .bi-bag-check-fill::before { content: "\f170"; } .bi-bag-check::before { content: "\f171"; } .bi-bag-dash-fill::before { content: "\f172"; } .bi-bag-dash::before { content: "\f173"; } .bi-bag-fill::before { content: "\f174"; } .bi-bag-plus-fill::before { content: "\f175"; } .bi-bag-plus::before { content: "\f176"; } .bi-bag-x-fill::before { content: "\f177"; } .bi-bag-x::before { content: "\f178"; } .bi-bag::before { content: "\f179"; } .bi-bar-chart-fill::before { content: "\f17a"; } .bi-bar-chart-line-fill::before { content: "\f17b"; } .bi-bar-chart-line::before { content: "\f17c"; } .bi-bar-chart-steps::before { content: "\f17d"; } .bi-bar-chart::before { content: "\f17e"; } .bi-basket-fill::before { content: "\f17f"; } .bi-basket::before { content: "\f180"; } .bi-basket2-fill::before { content: "\f181"; } .bi-basket2::before { content: "\f182"; } .bi-basket3-fill::before { content: "\f183"; } .bi-basket3::before { content: "\f184"; } .bi-battery-charging::before { content: "\f185"; } .bi-battery-full::before { content: "\f186"; } .bi-battery-half::before { content: "\f187"; } .bi-battery::before { content: "\f188"; } .bi-bell-fill::before { content: "\f189"; } .bi-bell::before { content: "\f18a"; } .bi-bezier::before { content: "\f18b"; } .bi-bezier2::before { content: "\f18c"; } .bi-bicycle::before { content: "\f18d"; } .bi-binoculars-fill::before { content: "\f18e"; } .bi-binoculars::before { content: "\f18f"; } .bi-blockquote-left::before { content: "\f190"; } .bi-blockquote-right::before { content: "\f191"; } .bi-book-fill::before { content: "\f192"; } .bi-book-half::before { content: "\f193"; } .bi-book::before { content: "\f194"; } .bi-bookmark-check-fill::before { content: "\f195"; } .bi-bookmark-check::before { content: "\f196"; } .bi-bookmark-dash-fill::before { content: "\f197"; } .bi-bookmark-dash::before { content: "\f198"; } .bi-bookmark-fill::before { content: "\f199"; } .bi-bookmark-heart-fill::before { content: "\f19a"; } .bi-bookmark-heart::before { content: "\f19b"; } .bi-bookmark-plus-fill::before { content: "\f19c"; } .bi-bookmark-plus::before { content: "\f19d"; } .bi-bookmark-star-fill::before { content: "\f19e"; } .bi-bookmark-star::before { content: "\f19f"; } .bi-bookmark-x-fill::before { content: "\f1a0"; } .bi-bookmark-x::before { content: "\f1a1"; } .bi-bookmark::before { content: "\f1a2"; } .bi-bookmarks-fill::before { content: "\f1a3"; } .bi-bookmarks::before { content: "\f1a4"; } .bi-bookshelf::before { content: "\f1a5"; } .bi-bootstrap-fill::before { content: "\f1a6"; } .bi-bootstrap-reboot::before { content: "\f1a7"; } .bi-bootstrap::before { content: "\f1a8"; } .bi-border-all::before { content: "\f1a9"; } .bi-border-bottom::before { content: "\f1aa"; } .bi-border-center::before { content: "\f1ab"; } .bi-border-inner::before { content: "\f1ac"; } .bi-border-left::before { content: "\f1ad"; } .bi-border-middle::before { content: "\f1ae"; } .bi-border-outer::before { content: "\f1af"; } .bi-border-right::before { content: "\f1b0"; } .bi-border-style::before { content: "\f1b1"; } .bi-border-top::before { content: "\f1b2"; } .bi-border-width::before { content: "\f1b3"; } .bi-border::before { content: "\f1b4"; } .bi-bounding-box-circles::before { content: "\f1b5"; } .bi-bounding-box::before { content: "\f1b6"; } .bi-box-arrow-down-left::before { content: "\f1b7"; } .bi-box-arrow-down-right::before { content: "\f1b8"; } .bi-box-arrow-down::before { content: "\f1b9"; } .bi-box-arrow-in-down-left::before { content: "\f1ba"; } .bi-box-arrow-in-down-right::before { content: "\f1bb"; } .bi-box-arrow-in-down::before { content: "\f1bc"; } .bi-box-arrow-in-left::before { content: "\f1bd"; } .bi-box-arrow-in-right::before { content: "\f1be"; } .bi-box-arrow-in-up-left::before { content: "\f1bf"; } .bi-box-arrow-in-up-right::before { content: "\f1c0"; } .bi-box-arrow-in-up::before { content: "\f1c1"; } .bi-box-arrow-left::before { content: "\f1c2"; } .bi-box-arrow-right::before { content: "\f1c3"; } .bi-box-arrow-up-left::before { content: "\f1c4"; } .bi-box-arrow-up-right::before { content: "\f1c5"; } .bi-box-arrow-up::before { content: "\f1c6"; } .bi-box-seam::before { content: "\f1c7"; } .bi-box::before { content: "\f1c8"; } .bi-braces::before { content: "\f1c9"; } .bi-bricks::before { content: "\f1ca"; } .bi-briefcase-fill::before { content: "\f1cb"; } .bi-briefcase::before { content: "\f1cc"; } .bi-brightness-alt-high-fill::before { content: "\f1cd"; } .bi-brightness-alt-high::before { content: "\f1ce"; } .bi-brightness-alt-low-fill::before { content: "\f1cf"; } .bi-brightness-alt-low::before { content: "\f1d0"; } .bi-brightness-high-fill::before { content: "\f1d1"; } .bi-brightness-high::before { content: "\f1d2"; } .bi-brightness-low-fill::before { content: "\f1d3"; } .bi-brightness-low::before { content: "\f1d4"; } .bi-broadcast-pin::before { content: "\f1d5"; } .bi-broadcast::before { content: "\f1d6"; } .bi-brush-fill::before { content: "\f1d7"; } .bi-brush::before { content: "\f1d8"; } .bi-bucket-fill::before { content: "\f1d9"; } .bi-bucket::before { content: "\f1da"; } .bi-bug-fill::before { content: "\f1db"; } .bi-bug::before { content: "\f1dc"; } .bi-building::before { content: "\f1dd"; } .bi-bullseye::before { content: "\f1de"; } .bi-calculator-fill::before { content: "\f1df"; } .bi-calculator::before { content: "\f1e0"; } .bi-calendar-check-fill::before { content: "\f1e1"; } .bi-calendar-check::before { content: "\f1e2"; } .bi-calendar-date-fill::before { content: "\f1e3"; } .bi-calendar-date::before { content: "\f1e4"; } .bi-calendar-day-fill::before { content: "\f1e5"; } .bi-calendar-day::before { content: "\f1e6"; } .bi-calendar-event-fill::before { content: "\f1e7"; } .bi-calendar-event::before { content: "\f1e8"; } .bi-calendar-fill::before { content: "\f1e9"; } .bi-calendar-minus-fill::before { content: "\f1ea"; } .bi-calendar-minus::before { content: "\f1eb"; } .bi-calendar-month-fill::before { content: "\f1ec"; } .bi-calendar-month::before { content: "\f1ed"; } .bi-calendar-plus-fill::before { content: "\f1ee"; } .bi-calendar-plus::before { content: "\f1ef"; } .bi-calendar-range-fill::before { content: "\f1f0"; } .bi-calendar-range::before { content: "\f1f1"; } .bi-calendar-week-fill::before { content: "\f1f2"; } .bi-calendar-week::before { content: "\f1f3"; } .bi-calendar-x-fill::before { content: "\f1f4"; } .bi-calendar-x::before { content: "\f1f5"; } .bi-calendar::before { content: "\f1f6"; } .bi-calendar2-check-fill::before { content: "\f1f7"; } .bi-calendar2-check::before { content: "\f1f8"; } .bi-calendar2-date-fill::before { content: "\f1f9"; } .bi-calendar2-date::before { content: "\f1fa"; } .bi-calendar2-day-fill::before { content: "\f1fb"; } .bi-calendar2-day::before { content: "\f1fc"; } .bi-calendar2-event-fill::before { content: "\f1fd"; } .bi-calendar2-event::before { content: "\f1fe"; } .bi-calendar2-fill::before { content: "\f1ff"; } .bi-calendar2-minus-fill::before { content: "\f200"; } .bi-calendar2-minus::before { content: "\f201"; } .bi-calendar2-month-fill::before { content: "\f202"; } .bi-calendar2-month::before { content: "\f203"; } .bi-calendar2-plus-fill::before { content: "\f204"; } .bi-calendar2-plus::before { content: "\f205"; } .bi-calendar2-range-fill::before { content: "\f206"; } .bi-calendar2-range::before { content: "\f207"; } .bi-calendar2-week-fill::before { content: "\f208"; } .bi-calendar2-week::before { content: "\f209"; } .bi-calendar2-x-fill::before { content: "\f20a"; } .bi-calendar2-x::before { content: "\f20b"; } .bi-calendar2::before { content: "\f20c"; } .bi-calendar3-event-fill::before { content: "\f20d"; } .bi-calendar3-event::before { content: "\f20e"; } .bi-calendar3-fill::before { content: "\f20f"; } .bi-calendar3-range-fill::before { content: "\f210"; } .bi-calendar3-range::before { content: "\f211"; } .bi-calendar3-week-fill::before { content: "\f212"; } .bi-calendar3-week::before { content: "\f213"; } .bi-calendar3::before { content: "\f214"; } .bi-calendar4-event::before { content: "\f215"; } .bi-calendar4-range::before { content: "\f216"; } .bi-calendar4-week::before { content: "\f217"; } .bi-calendar4::before { content: "\f218"; } .bi-camera-fill::before { content: "\f219"; } .bi-camera-reels-fill::before { content: "\f21a"; } .bi-camera-reels::before { content: "\f21b"; } .bi-camera-video-fill::before { content: "\f21c"; } .bi-camera-video-off-fill::before { content: "\f21d"; } .bi-camera-video-off::before { content: "\f21e"; } .bi-camera-video::before { content: "\f21f"; } .bi-camera::before { content: "\f220"; } .bi-camera2::before { content: "\f221"; } .bi-capslock-fill::before { content: "\f222"; } .bi-capslock::before { content: "\f223"; } .bi-card-checklist::before { content: "\f224"; } .bi-card-heading::before { content: "\f225"; } .bi-card-image::before { content: "\f226"; } .bi-card-list::before { content: "\f227"; } .bi-card-text::before { content: "\f228"; } .bi-caret-down-fill::before { content: "\f229"; } .bi-caret-down-square-fill::before { content: "\f22a"; } .bi-caret-down-square::before { content: "\f22b"; } .bi-caret-down::before { content: "\f22c"; } .bi-caret-left-fill::before { content: "\f22d"; } .bi-caret-left-square-fill::before { content: "\f22e"; } .bi-caret-left-square::before { content: "\f22f"; } .bi-caret-left::before { content: "\f230"; } .bi-caret-right-fill::before { content: "\f231"; } .bi-caret-right-square-fill::before { content: "\f232"; } .bi-caret-right-square::before { content: "\f233"; } .bi-caret-right::before { content: "\f234"; } .bi-caret-up-fill::before { content: "\f235"; } .bi-caret-up-square-fill::before { content: "\f236"; } .bi-caret-up-square::before { content: "\f237"; } .bi-caret-up::before { content: "\f238"; } .bi-cart-check-fill::before { content: "\f239"; } .bi-cart-check::before { content: "\f23a"; } .bi-cart-dash-fill::before { content: "\f23b"; } .bi-cart-dash::before { content: "\f23c"; } .bi-cart-fill::before { content: "\f23d"; } .bi-cart-plus-fill::before { content: "\f23e"; } .bi-cart-plus::before { content: "\f23f"; } .bi-cart-x-fill::before { content: "\f240"; } .bi-cart-x::before { content: "\f241"; } .bi-cart::before { content: "\f242"; } .bi-cart2::before { content: "\f243"; } .bi-cart3::before { content: "\f244"; } .bi-cart4::before { content: "\f245"; } .bi-cash-stack::before { content: "\f246"; } .bi-cash::before { content: "\f247"; } .bi-cast::before { content: "\f248"; } .bi-chat-dots-fill::before { content: "\f249"; } .bi-chat-dots::before { content: "\f24a"; } .bi-chat-fill::before { content: "\f24b"; } .bi-chat-left-dots-fill::before { content: "\f24c"; } .bi-chat-left-dots::before { content: "\f24d"; } .bi-chat-left-fill::before { content: "\f24e"; } .bi-chat-left-quote-fill::before { content: "\f24f"; } .bi-chat-left-quote::before { content: "\f250"; } .bi-chat-left-text-fill::before { content: "\f251"; } .bi-chat-left-text::before { content: "\f252"; } .bi-chat-left::before { content: "\f253"; } .bi-chat-quote-fill::before { content: "\f254"; } .bi-chat-quote::before { content: "\f255"; } .bi-chat-right-dots-fill::before { content: "\f256"; } .bi-chat-right-dots::before { content: "\f257"; } .bi-chat-right-fill::before { content: "\f258"; } .bi-chat-right-quote-fill::before { content: "\f259"; } .bi-chat-right-quote::before { content: "\f25a"; } .bi-chat-right-text-fill::before { content: "\f25b"; } .bi-chat-right-text::before { content: "\f25c"; } .bi-chat-right::before { content: "\f25d"; } .bi-chat-square-dots-fill::before { content: "\f25e"; } .bi-chat-square-dots::before { content: "\f25f"; } .bi-chat-square-fill::before { content: "\f260"; } .bi-chat-square-quote-fill::before { content: "\f261"; } .bi-chat-square-quote::before { content: "\f262"; } .bi-chat-square-text-fill::before { content: "\f263"; } .bi-chat-square-text::before { content: "\f264"; } .bi-chat-square::before { content: "\f265"; } .bi-chat-text-fill::before { content: "\f266"; } .bi-chat-text::before { content: "\f267"; } .bi-chat::before { content: "\f268"; } .bi-check-all::before { content: "\f269"; } .bi-check-circle-fill::before { content: "\f26a"; } .bi-check-circle::before { content: "\f26b"; } .bi-check-square-fill::before { content: "\f26c"; } .bi-check-square::before { content: "\f26d"; } .bi-check::before { content: "\f26e"; } .bi-check2-all::before { content: "\f26f"; } .bi-check2-circle::before { content: "\f270"; } .bi-check2-square::before { content: "\f271"; } .bi-check2::before { content: "\f272"; } .bi-chevron-bar-contract::before { content: "\f273"; } .bi-chevron-bar-down::before { content: "\f274"; } .bi-chevron-bar-expand::before { content: "\f275"; } .bi-chevron-bar-left::before { content: "\f276"; } .bi-chevron-bar-right::before { content: "\f277"; } .bi-chevron-bar-up::before { content: "\f278"; } .bi-chevron-compact-down::before { content: "\f279"; } .bi-chevron-compact-left::before { content: "\f27a"; } .bi-chevron-compact-right::before { content: "\f27b"; } .bi-chevron-compact-up::before { content: "\f27c"; } .bi-chevron-contract::before { content: "\f27d"; } .bi-chevron-double-down::before { content: "\f27e"; } .bi-chevron-double-left::before { content: "\f27f"; } .bi-chevron-double-right::before { content: "\f280"; } .bi-chevron-double-up::before { content: "\f281"; } .bi-chevron-down::before { content: "\f282"; } .bi-chevron-expand::before { content: "\f283"; } .bi-chevron-left::before { content: "\f284"; } .bi-chevron-right::before { content: "\f285"; } .bi-chevron-up::before { content: "\f286"; } .bi-circle-fill::before { content: "\f287"; } .bi-circle-half::before { content: "\f288"; } .bi-circle-square::before { content: "\f289"; } .bi-circle::before { content: "\f28a"; } .bi-clipboard-check::before { content: "\f28b"; } .bi-clipboard-data::before { content: "\f28c"; } .bi-clipboard-minus::before { content: "\f28d"; } .bi-clipboard-plus::before { content: "\f28e"; } .bi-clipboard-x::before { content: "\f28f"; } .bi-clipboard::before { content: "\f290"; } .bi-clock-fill::before { content: "\f291"; } .bi-clock-history::before { content: "\f292"; } .bi-clock::before { content: "\f293"; } .bi-cloud-arrow-down-fill::before { content: "\f294"; } .bi-cloud-arrow-down::before { content: "\f295"; } .bi-cloud-arrow-up-fill::before { content: "\f296"; } .bi-cloud-arrow-up::before { content: "\f297"; } .bi-cloud-check-fill::before { content: "\f298"; } .bi-cloud-check::before { content: "\f299"; } .bi-cloud-download-fill::before { content: "\f29a"; } .bi-cloud-download::before { content: "\f29b"; } .bi-cloud-drizzle-fill::before { content: "\f29c"; } .bi-cloud-drizzle::before { content: "\f29d"; } .bi-cloud-fill::before { content: "\f29e"; } .bi-cloud-fog-fill::before { content: "\f29f"; } .bi-cloud-fog::before { content: "\f2a0"; } .bi-cloud-fog2-fill::before { content: "\f2a1"; } .bi-cloud-fog2::before { content: "\f2a2"; } .bi-cloud-hail-fill::before { content: "\f2a3"; } .bi-cloud-hail::before { content: "\f2a4"; } .bi-cloud-haze-1::before { content: "\f2a5"; } .bi-cloud-haze-fill::before { content: "\f2a6"; } .bi-cloud-haze::before { content: "\f2a7"; } .bi-cloud-haze2-fill::before { content: "\f2a8"; } .bi-cloud-lightning-fill::before { content: "\f2a9"; } .bi-cloud-lightning-rain-fill::before { content: "\f2aa"; } .bi-cloud-lightning-rain::before { content: "\f2ab"; } .bi-cloud-lightning::before { content: "\f2ac"; } .bi-cloud-minus-fill::before { content: "\f2ad"; } .bi-cloud-minus::before { content: "\f2ae"; } .bi-cloud-moon-fill::before { content: "\f2af"; } .bi-cloud-moon::before { content: "\f2b0"; } .bi-cloud-plus-fill::before { content: "\f2b1"; } .bi-cloud-plus::before { content: "\f2b2"; } .bi-cloud-rain-fill::before { content: "\f2b3"; } .bi-cloud-rain-heavy-fill::before { content: "\f2b4"; } .bi-cloud-rain-heavy::before { content: "\f2b5"; } .bi-cloud-rain::before { content: "\f2b6"; } .bi-cloud-slash-fill::before { content: "\f2b7"; } .bi-cloud-slash::before { content: "\f2b8"; } .bi-cloud-sleet-fill::before { content: "\f2b9"; } .bi-cloud-sleet::before { content: "\f2ba"; } .bi-cloud-snow-fill::before { content: "\f2bb"; } .bi-cloud-snow::before { content: "\f2bc"; } .bi-cloud-sun-fill::before { content: "\f2bd"; } .bi-cloud-sun::before { content: "\f2be"; } .bi-cloud-upload-fill::before { content: "\f2bf"; } .bi-cloud-upload::before { content: "\f2c0"; } .bi-cloud::before { content: "\f2c1"; } .bi-clouds-fill::before { content: "\f2c2"; } .bi-clouds::before { content: "\f2c3"; } .bi-cloudy-fill::before { content: "\f2c4"; } .bi-cloudy::before { content: "\f2c5"; } .bi-code-slash::before { content: "\f2c6"; } .bi-code-square::before { content: "\f2c7"; } .bi-code::before { content: "\f2c8"; } .bi-collection-fill::before { content: "\f2c9"; } .bi-collection-play-fill::before { content: "\f2ca"; } .bi-collection-play::before { content: "\f2cb"; } .bi-collection::before { content: "\f2cc"; } .bi-columns-gap::before { content: "\f2cd"; } .bi-columns::before { content: "\f2ce"; } .bi-command::before { content: "\f2cf"; } .bi-compass-fill::before { content: "\f2d0"; } .bi-compass::before { content: "\f2d1"; } .bi-cone-striped::before { content: "\f2d2"; } .bi-cone::before { content: "\f2d3"; } .bi-controller::before { content: "\f2d4"; } .bi-cpu-fill::before { content: "\f2d5"; } .bi-cpu::before { content: "\f2d6"; } .bi-credit-card-2-back-fill::before { content: "\f2d7"; } .bi-credit-card-2-back::before { content: "\f2d8"; } .bi-credit-card-2-front-fill::before { content: "\f2d9"; } .bi-credit-card-2-front::before { content: "\f2da"; } .bi-credit-card-fill::before { content: "\f2db"; } .bi-credit-card::before { content: "\f2dc"; } .bi-crop::before { content: "\f2dd"; } .bi-cup-fill::before { content: "\f2de"; } .bi-cup-straw::before { content: "\f2df"; } .bi-cup::before { content: "\f2e0"; } .bi-cursor-fill::before { content: "\f2e1"; } .bi-cursor-text::before { content: "\f2e2"; } .bi-cursor::before { content: "\f2e3"; } .bi-dash-circle-dotted::before { content: "\f2e4"; } .bi-dash-circle-fill::before { content: "\f2e5"; } .bi-dash-circle::before { content: "\f2e6"; } .bi-dash-square-dotted::before { content: "\f2e7"; } .bi-dash-square-fill::before { content: "\f2e8"; } .bi-dash-square::before { content: "\f2e9"; } .bi-dash::before { content: "\f2ea"; } .bi-diagram-2-fill::before { content: "\f2eb"; } .bi-diagram-2::before { content: "\f2ec"; } .bi-diagram-3-fill::before { content: "\f2ed"; } .bi-diagram-3::before { content: "\f2ee"; } .bi-diamond-fill::before { content: "\f2ef"; } .bi-diamond-half::before { content: "\f2f0"; } .bi-diamond::before { content: "\f2f1"; } .bi-dice-1-fill::before { content: "\f2f2"; } .bi-dice-1::before { content: "\f2f3"; } .bi-dice-2-fill::before { content: "\f2f4"; } .bi-dice-2::before { content: "\f2f5"; } .bi-dice-3-fill::before { content: "\f2f6"; } .bi-dice-3::before { content: "\f2f7"; } .bi-dice-4-fill::before { content: "\f2f8"; } .bi-dice-4::before { content: "\f2f9"; } .bi-dice-5-fill::before { content: "\f2fa"; } .bi-dice-5::before { content: "\f2fb"; } .bi-dice-6-fill::before { content: "\f2fc"; } .bi-dice-6::before { content: "\f2fd"; } .bi-disc-fill::before { content: "\f2fe"; } .bi-disc::before { content: "\f2ff"; } .bi-discord::before { content: "\f300"; } .bi-display-fill::before { content: "\f301"; } .bi-display::before { content: "\f302"; } .bi-distribute-horizontal::before { content: "\f303"; } .bi-distribute-vertical::before { content: "\f304"; } .bi-door-closed-fill::before { content: "\f305"; } .bi-door-closed::before { content: "\f306"; } .bi-door-open-fill::before { content: "\f307"; } .bi-door-open::before { content: "\f308"; } .bi-dot::before { content: "\f309"; } .bi-download::before { content: "\f30a"; } .bi-droplet-fill::before { content: "\f30b"; } .bi-droplet-half::before { content: "\f30c"; } .bi-droplet::before { content: "\f30d"; } .bi-earbuds::before { content: "\f30e"; } .bi-easel-fill::before { content: "\f30f"; } .bi-easel::before { content: "\f310"; } .bi-egg-fill::before { content: "\f311"; } .bi-egg-fried::before { content: "\f312"; } .bi-egg::before { content: "\f313"; } .bi-eject-fill::before { content: "\f314"; } .bi-eject::before { content: "\f315"; } .bi-emoji-angry-fill::before { content: "\f316"; } .bi-emoji-angry::before { content: "\f317"; } .bi-emoji-dizzy-fill::before { content: "\f318"; } .bi-emoji-dizzy::before { content: "\f319"; } .bi-emoji-expressionless-fill::before { content: "\f31a"; } .bi-emoji-expressionless::before { content: "\f31b"; } .bi-emoji-frown-fill::before { content: "\f31c"; } .bi-emoji-frown::before { content: "\f31d"; } .bi-emoji-heart-eyes-fill::before { content: "\f31e"; } .bi-emoji-heart-eyes::before { content: "\f31f"; } .bi-emoji-laughing-fill::before { content: "\f320"; } .bi-emoji-laughing::before { content: "\f321"; } .bi-emoji-neutral-fill::before { content: "\f322"; } .bi-emoji-neutral::before { content: "\f323"; } .bi-emoji-smile-fill::before { content: "\f324"; } .bi-emoji-smile-upside-down-fill::before { content: "\f325"; } .bi-emoji-smile-upside-down::before { content: "\f326"; } .bi-emoji-smile::before { content: "\f327"; } .bi-emoji-sunglasses-fill::before { content: "\f328"; } .bi-emoji-sunglasses::before { content: "\f329"; } .bi-emoji-wink-fill::before { content: "\f32a"; } .bi-emoji-wink::before { content: "\f32b"; } .bi-envelope-fill::before { content: "\f32c"; } .bi-envelope-open-fill::before { content: "\f32d"; } .bi-envelope-open::before { content: "\f32e"; } .bi-envelope::before { content: "\f32f"; } .bi-eraser-fill::before { content: "\f330"; } .bi-eraser::before { content: "\f331"; } .bi-exclamation-circle-fill::before { content: "\f332"; } .bi-exclamation-circle::before { content: "\f333"; } .bi-exclamation-diamond-fill::before { content: "\f334"; } .bi-exclamation-diamond::before { content: "\f335"; } .bi-exclamation-octagon-fill::before { content: "\f336"; } .bi-exclamation-octagon::before { content: "\f337"; } .bi-exclamation-square-fill::before { content: "\f338"; } .bi-exclamation-square::before { content: "\f339"; } .bi-exclamation-triangle-fill::before { content: "\f33a"; } .bi-exclamation-triangle::before { content: "\f33b"; } .bi-exclamation::before { content: "\f33c"; } .bi-exclude::before { content: "\f33d"; } .bi-eye-fill::before { content: "\f33e"; } .bi-eye-slash-fill::before { content: "\f33f"; } .bi-eye-slash::before { content: "\f340"; } .bi-eye::before { content: "\f341"; } .bi-eyedropper::before { content: "\f342"; } .bi-eyeglasses::before { content: "\f343"; } .bi-facebook::before { content: "\f344"; } .bi-file-arrow-down-fill::before { content: "\f345"; } .bi-file-arrow-down::before { content: "\f346"; } .bi-file-arrow-up-fill::before { content: "\f347"; } .bi-file-arrow-up::before { content: "\f348"; } .bi-file-bar-graph-fill::before { content: "\f349"; } .bi-file-bar-graph::before { content: "\f34a"; } .bi-file-binary-fill::before { content: "\f34b"; } .bi-file-binary::before { content: "\f34c"; } .bi-file-break-fill::before { content: "\f34d"; } .bi-file-break::before { content: "\f34e"; } .bi-file-check-fill::before { content: "\f34f"; } .bi-file-check::before { content: "\f350"; } .bi-file-code-fill::before { content: "\f351"; } .bi-file-code::before { content: "\f352"; } .bi-file-diff-fill::before { content: "\f353"; } .bi-file-diff::before { content: "\f354"; } .bi-file-earmark-arrow-down-fill::before { content: "\f355"; } .bi-file-earmark-arrow-down::before { content: "\f356"; } .bi-file-earmark-arrow-up-fill::before { content: "\f357"; } .bi-file-earmark-arrow-up::before { content: "\f358"; } .bi-file-earmark-bar-graph-fill::before { content: "\f359"; } .bi-file-earmark-bar-graph::before { content: "\f35a"; } .bi-file-earmark-binary-fill::before { content: "\f35b"; } .bi-file-earmark-binary::before { content: "\f35c"; } .bi-file-earmark-break-fill::before { content: "\f35d"; } .bi-file-earmark-break::before { content: "\f35e"; } .bi-file-earmark-check-fill::before { content: "\f35f"; } .bi-file-earmark-check::before { content: "\f360"; } .bi-file-earmark-code-fill::before { content: "\f361"; } .bi-file-earmark-code::before { content: "\f362"; } .bi-file-earmark-diff-fill::before { content: "\f363"; } .bi-file-earmark-diff::before { content: "\f364"; } .bi-file-earmark-easel-fill::before { content: "\f365"; } .bi-file-earmark-easel::before { content: "\f366"; } .bi-file-earmark-excel-fill::before { content: "\f367"; } .bi-file-earmark-excel::before { content: "\f368"; } .bi-file-earmark-fill::before { content: "\f369"; } .bi-file-earmark-font-fill::before { content: "\f36a"; } .bi-file-earmark-font::before { content: "\f36b"; } .bi-file-earmark-image-fill::before { content: "\f36c"; } .bi-file-earmark-image::before { content: "\f36d"; } .bi-file-earmark-lock-fill::before { content: "\f36e"; } .bi-file-earmark-lock::before { content: "\f36f"; } .bi-file-earmark-lock2-fill::before { content: "\f370"; } .bi-file-earmark-lock2::before { content: "\f371"; } .bi-file-earmark-medical-fill::before { content: "\f372"; } .bi-file-earmark-medical::before { content: "\f373"; } .bi-file-earmark-minus-fill::before { content: "\f374"; } .bi-file-earmark-minus::before { content: "\f375"; } .bi-file-earmark-music-fill::before { content: "\f376"; } .bi-file-earmark-music::before { content: "\f377"; } .bi-file-earmark-person-fill::before { content: "\f378"; } .bi-file-earmark-person::before { content: "\f379"; } .bi-file-earmark-play-fill::before { content: "\f37a"; } .bi-file-earmark-play::before { content: "\f37b"; } .bi-file-earmark-plus-fill::before { content: "\f37c"; } .bi-file-earmark-plus::before { content: "\f37d"; } .bi-file-earmark-post-fill::before { content: "\f37e"; } .bi-file-earmark-post::before { content: "\f37f"; } .bi-file-earmark-ppt-fill::before { content: "\f380"; } .bi-file-earmark-ppt::before { content: "\f381"; } .bi-file-earmark-richtext-fill::before { content: "\f382"; } .bi-file-earmark-richtext::before { content: "\f383"; } .bi-file-earmark-ruled-fill::before { content: "\f384"; } .bi-file-earmark-ruled::before { content: "\f385"; } .bi-file-earmark-slides-fill::before { content: "\f386"; } .bi-file-earmark-slides::before { content: "\f387"; } .bi-file-earmark-spreadsheet-fill::before { content: "\f388"; } .bi-file-earmark-spreadsheet::before { content: "\f389"; } .bi-file-earmark-text-fill::before { content: "\f38a"; } .bi-file-earmark-text::before { content: "\f38b"; } .bi-file-earmark-word-fill::before { content: "\f38c"; } .bi-file-earmark-word::before { content: "\f38d"; } .bi-file-earmark-x-fill::before { content: "\f38e"; } .bi-file-earmark-x::before { content: "\f38f"; } .bi-file-earmark-zip-fill::before { content: "\f390"; } .bi-file-earmark-zip::before { content: "\f391"; } .bi-file-earmark::before { content: "\f392"; } .bi-file-easel-fill::before { content: "\f393"; } .bi-file-easel::before { content: "\f394"; } .bi-file-excel-fill::before { content: "\f395"; } .bi-file-excel::before { content: "\f396"; } .bi-file-fill::before { content: "\f397"; } .bi-file-font-fill::before { content: "\f398"; } .bi-file-font::before { content: "\f399"; } .bi-file-image-fill::before { content: "\f39a"; } .bi-file-image::before { content: "\f39b"; } .bi-file-lock-fill::before { content: "\f39c"; } .bi-file-lock::before { content: "\f39d"; } .bi-file-lock2-fill::before { content: "\f39e"; } .bi-file-lock2::before { content: "\f39f"; } .bi-file-medical-fill::before { content: "\f3a0"; } .bi-file-medical::before { content: "\f3a1"; } .bi-file-minus-fill::before { content: "\f3a2"; } .bi-file-minus::before { content: "\f3a3"; } .bi-file-music-fill::before { content: "\f3a4"; } .bi-file-music::before { content: "\f3a5"; } .bi-file-person-fill::before { content: "\f3a6"; } .bi-file-person::before { content: "\f3a7"; } .bi-file-play-fill::before { content: "\f3a8"; } .bi-file-play::before { content: "\f3a9"; } .bi-file-plus-fill::before { content: "\f3aa"; } .bi-file-plus::before { content: "\f3ab"; } .bi-file-post-fill::before { content: "\f3ac"; } .bi-file-post::before { content: "\f3ad"; } .bi-file-ppt-fill::before { content: "\f3ae"; } .bi-file-ppt::before { content: "\f3af"; } .bi-file-richtext-fill::before { content: "\f3b0"; } .bi-file-richtext::before { content: "\f3b1"; } .bi-file-ruled-fill::before { content: "\f3b2"; } .bi-file-ruled::before { content: "\f3b3"; } .bi-file-slides-fill::before { content: "\f3b4"; } .bi-file-slides::before { content: "\f3b5"; } .bi-file-spreadsheet-fill::before { content: "\f3b6"; } .bi-file-spreadsheet::before { content: "\f3b7"; } .bi-file-text-fill::before { content: "\f3b8"; } .bi-file-text::before { content: "\f3b9"; } .bi-file-word-fill::before { content: "\f3ba"; } .bi-file-word::before { content: "\f3bb"; } .bi-file-x-fill::before { content: "\f3bc"; } .bi-file-x::before { content: "\f3bd"; } .bi-file-zip-fill::before { content: "\f3be"; } .bi-file-zip::before { content: "\f3bf"; } .bi-file::before { content: "\f3c0"; } .bi-files-alt::before { content: "\f3c1"; } .bi-files::before { content: "\f3c2"; } .bi-film::before { content: "\f3c3"; } .bi-filter-circle-fill::before { content: "\f3c4"; } .bi-filter-circle::before { content: "\f3c5"; } .bi-filter-left::before { content: "\f3c6"; } .bi-filter-right::before { content: "\f3c7"; } .bi-filter-square-fill::before { content: "\f3c8"; } .bi-filter-square::before { content: "\f3c9"; } .bi-filter::before { content: "\f3ca"; } .bi-flag-fill::before { content: "\f3cb"; } .bi-flag::before { content: "\f3cc"; } .bi-flower1::before { content: "\f3cd"; } .bi-flower2::before { content: "\f3ce"; } .bi-flower3::before { content: "\f3cf"; } .bi-folder-check::before { content: "\f3d0"; } .bi-folder-fill::before { content: "\f3d1"; } .bi-folder-minus::before { content: "\f3d2"; } .bi-folder-plus::before { content: "\f3d3"; } .bi-folder-symlink-fill::before { content: "\f3d4"; } .bi-folder-symlink::before { content: "\f3d5"; } .bi-folder-x::before { content: "\f3d6"; } .bi-folder::before { content: "\f3d7"; } .bi-folder2-open::before { content: "\f3d8"; } .bi-folder2::before { content: "\f3d9"; } .bi-fonts::before { content: "\f3da"; } .bi-forward-fill::before { content: "\f3db"; } .bi-forward::before { content: "\f3dc"; } .bi-front::before { content: "\f3dd"; } .bi-fullscreen-exit::before { content: "\f3de"; } .bi-fullscreen::before { content: "\f3df"; } .bi-funnel-fill::before { content: "\f3e0"; } .bi-funnel::before { content: "\f3e1"; } .bi-gear-fill::before { content: "\f3e2"; } .bi-gear-wide-connected::before { content: "\f3e3"; } .bi-gear-wide::before { content: "\f3e4"; } .bi-gear::before { content: "\f3e5"; } .bi-gem::before { content: "\f3e6"; } .bi-geo-alt-fill::before { content: "\f3e7"; } .bi-geo-alt::before { content: "\f3e8"; } .bi-geo-fill::before { content: "\f3e9"; } .bi-geo::before { content: "\f3ea"; } .bi-gift-fill::before { content: "\f3eb"; } .bi-gift::before { content: "\f3ec"; } .bi-github::before { content: "\f3ed"; } .bi-globe::before { content: "\f3ee"; } .bi-globe2::before { content: "\f3ef"; } .bi-google::before { content: "\f3f0"; } .bi-graph-down::before { content: "\f3f1"; } .bi-graph-up::before { content: "\f3f2"; } .bi-grid-1x2-fill::before { content: "\f3f3"; } .bi-grid-1x2::before { content: "\f3f4"; } .bi-grid-3x2-gap-fill::before { content: "\f3f5"; } .bi-grid-3x2-gap::before { content: "\f3f6"; } .bi-grid-3x2::before { content: "\f3f7"; } .bi-grid-3x3-gap-fill::before { content: "\f3f8"; } .bi-grid-3x3-gap::before { content: "\f3f9"; } .bi-grid-3x3::before { content: "\f3fa"; } .bi-grid-fill::before { content: "\f3fb"; } .bi-grid::before { content: "\f3fc"; } .bi-grip-horizontal::before { content: "\f3fd"; } .bi-grip-vertical::before { content: "\f3fe"; } .bi-hammer::before { content: "\f3ff"; } .bi-hand-index-fill::before { content: "\f400"; } .bi-hand-index-thumb-fill::before { content: "\f401"; } .bi-hand-index-thumb::before { content: "\f402"; } .bi-hand-index::before { content: "\f403"; } .bi-hand-thumbs-down-fill::before { content: "\f404"; } .bi-hand-thumbs-down::before { content: "\f405"; } .bi-hand-thumbs-up-fill::before { content: "\f406"; } .bi-hand-thumbs-up::before { content: "\f407"; } .bi-handbag-fill::before { content: "\f408"; } .bi-handbag::before { content: "\f409"; } .bi-hash::before { content: "\f40a"; } .bi-hdd-fill::before { content: "\f40b"; } .bi-hdd-network-fill::before { content: "\f40c"; } .bi-hdd-network::before { content: "\f40d"; } .bi-hdd-rack-fill::before { content: "\f40e"; } .bi-hdd-rack::before { content: "\f40f"; } .bi-hdd-stack-fill::before { content: "\f410"; } .bi-hdd-stack::before { content: "\f411"; } .bi-hdd::before { content: "\f412"; } .bi-headphones::before { content: "\f413"; } .bi-headset::before { content: "\f414"; } .bi-heart-fill::before { content: "\f415"; } .bi-heart-half::before { content: "\f416"; } .bi-heart::before { content: "\f417"; } .bi-heptagon-fill::before { content: "\f418"; } .bi-heptagon-half::before { content: "\f419"; } .bi-heptagon::before { content: "\f41a"; } .bi-hexagon-fill::before { content: "\f41b"; } .bi-hexagon-half::before { content: "\f41c"; } .bi-hexagon::before { content: "\f41d"; } .bi-hourglass-bottom::before { content: "\f41e"; } .bi-hourglass-split::before { content: "\f41f"; } .bi-hourglass-top::before { content: "\f420"; } .bi-hourglass::before { content: "\f421"; } .bi-house-door-fill::before { content: "\f422"; } .bi-house-door::before { content: "\f423"; } .bi-house-fill::before { content: "\f424"; } .bi-house::before { content: "\f425"; } .bi-hr::before { content: "\f426"; } .bi-hurricane::before { content: "\f427"; } .bi-image-alt::before { content: "\f428"; } .bi-image-fill::before { content: "\f429"; } .bi-image::before { content: "\f42a"; } .bi-images::before { content: "\f42b"; } .bi-inbox-fill::before { content: "\f42c"; } .bi-inbox::before { content: "\f42d"; } .bi-inboxes-fill::before { content: "\f42e"; } .bi-inboxes::before { content: "\f42f"; } .bi-info-circle-fill::before { content: "\f430"; } .bi-info-circle::before { content: "\f431"; } .bi-info-square-fill::before { content: "\f432"; } .bi-info-square::before { content: "\f433"; } .bi-info::before { content: "\f434"; } .bi-input-cursor-text::before { content: "\f435"; } .bi-input-cursor::before { content: "\f436"; } .bi-instagram::before { content: "\f437"; } .bi-intersect::before { content: "\f438"; } .bi-journal-album::before { content: "\f439"; } .bi-journal-arrow-down::before { content: "\f43a"; } .bi-journal-arrow-up::before { content: "\f43b"; } .bi-journal-bookmark-fill::before { content: "\f43c"; } .bi-journal-bookmark::before { content: "\f43d"; } .bi-journal-check::before { content: "\f43e"; } .bi-journal-code::before { content: "\f43f"; } .bi-journal-medical::before { content: "\f440"; } .bi-journal-minus::before { content: "\f441"; } .bi-journal-plus::before { content: "\f442"; } .bi-journal-richtext::before { content: "\f443"; } .bi-journal-text::before { content: "\f444"; } .bi-journal-x::before { content: "\f445"; } .bi-journal::before { content: "\f446"; } .bi-journals::before { content: "\f447"; } .bi-joystick::before { content: "\f448"; } .bi-justify-left::before { content: "\f449"; } .bi-justify-right::before { content: "\f44a"; } .bi-justify::before { content: "\f44b"; } .bi-kanban-fill::before { content: "\f44c"; } .bi-kanban::before { content: "\f44d"; } .bi-key-fill::before { content: "\f44e"; } .bi-key::before { content: "\f44f"; } .bi-keyboard-fill::before { content: "\f450"; } .bi-keyboard::before { content: "\f451"; } .bi-ladder::before { content: "\f452"; } .bi-lamp-fill::before { content: "\f453"; } .bi-lamp::before { content: "\f454"; } .bi-laptop-fill::before { content: "\f455"; } .bi-laptop::before { content: "\f456"; } .bi-layer-backward::before { content: "\f457"; } .bi-layer-forward::before { content: "\f458"; } .bi-layers-fill::before { content: "\f459"; } .bi-layers-half::before { content: "\f45a"; } .bi-layers::before { content: "\f45b"; } .bi-layout-sidebar-inset-reverse::before { content: "\f45c"; } .bi-layout-sidebar-inset::before { content: "\f45d"; } .bi-layout-sidebar-reverse::before { content: "\f45e"; } .bi-layout-sidebar::before { content: "\f45f"; } .bi-layout-split::before { content: "\f460"; } .bi-layout-text-sidebar-reverse::before { content: "\f461"; } .bi-layout-text-sidebar::before { content: "\f462"; } .bi-layout-text-window-reverse::before { content: "\f463"; } .bi-layout-text-window::before { content: "\f464"; } .bi-layout-three-columns::before { content: "\f465"; } .bi-layout-wtf::before { content: "\f466"; } .bi-life-preserver::before { content: "\f467"; } .bi-lightbulb-fill::before { content: "\f468"; } .bi-lightbulb-off-fill::before { content: "\f469"; } .bi-lightbulb-off::before { content: "\f46a"; } .bi-lightbulb::before { content: "\f46b"; } .bi-lightning-charge-fill::before { content: "\f46c"; } .bi-lightning-charge::before { content: "\f46d"; } .bi-lightning-fill::before { content: "\f46e"; } .bi-lightning::before { content: "\f46f"; } .bi-link-45deg::before { content: "\f470"; } .bi-link::before { content: "\f471"; } .bi-linkedin::before { content: "\f472"; } .bi-list-check::before { content: "\f473"; } .bi-list-nested::before { content: "\f474"; } .bi-list-ol::before { content: "\f475"; } .bi-list-stars::before { content: "\f476"; } .bi-list-task::before { content: "\f477"; } .bi-list-ul::before { content: "\f478"; } .bi-list::before { content: "\f479"; } .bi-lock-fill::before { content: "\f47a"; } .bi-lock::before { content: "\f47b"; } .bi-mailbox::before { content: "\f47c"; } .bi-mailbox2::before { content: "\f47d"; } .bi-map-fill::before { content: "\f47e"; } .bi-map::before { content: "\f47f"; } .bi-markdown-fill::before { content: "\f480"; } .bi-markdown::before { content: "\f481"; } .bi-mask::before { content: "\f482"; } .bi-megaphone-fill::before { content: "\f483"; } .bi-megaphone::before { content: "\f484"; } .bi-menu-app-fill::before { content: "\f485"; } .bi-menu-app::before { content: "\f486"; } .bi-menu-button-fill::before { content: "\f487"; } .bi-menu-button-wide-fill::before { content: "\f488"; } .bi-menu-button-wide::before { content: "\f489"; } .bi-menu-button::before { content: "\f48a"; } .bi-menu-down::before { content: "\f48b"; } .bi-menu-up::before { content: "\f48c"; } .bi-mic-fill::before { content: "\f48d"; } .bi-mic-mute-fill::before { content: "\f48e"; } .bi-mic-mute::before { content: "\f48f"; } .bi-mic::before { content: "\f490"; } .bi-minecart-loaded::before { content: "\f491"; } .bi-minecart::before { content: "\f492"; } .bi-moisture::before { content: "\f493"; } .bi-moon-fill::before { content: "\f494"; } .bi-moon-stars-fill::before { content: "\f495"; } .bi-moon-stars::before { content: "\f496"; } .bi-moon::before { content: "\f497"; } .bi-mouse-fill::before { content: "\f498"; } .bi-mouse::before { content: "\f499"; } .bi-mouse2-fill::before { content: "\f49a"; } .bi-mouse2::before { content: "\f49b"; } .bi-mouse3-fill::before { content: "\f49c"; } .bi-mouse3::before { content: "\f49d"; } .bi-music-note-beamed::before { content: "\f49e"; } .bi-music-note-list::before { content: "\f49f"; } .bi-music-note::before { content: "\f4a0"; } .bi-music-player-fill::before { content: "\f4a1"; } .bi-music-player::before { content: "\f4a2"; } .bi-newspaper::before { content: "\f4a3"; } .bi-node-minus-fill::before { content: "\f4a4"; } .bi-node-minus::before { content: "\f4a5"; } .bi-node-plus-fill::before { content: "\f4a6"; } .bi-node-plus::before { content: "\f4a7"; } .bi-nut-fill::before { content: "\f4a8"; } .bi-nut::before { content: "\f4a9"; } .bi-octagon-fill::before { content: "\f4aa"; } .bi-octagon-half::before { content: "\f4ab"; } .bi-octagon::before { content: "\f4ac"; } .bi-option::before { content: "\f4ad"; } .bi-outlet::before { content: "\f4ae"; } .bi-paint-bucket::before { content: "\f4af"; } .bi-palette-fill::before { content: "\f4b0"; } .bi-palette::before { content: "\f4b1"; } .bi-palette2::before { content: "\f4b2"; } .bi-paperclip::before { content: "\f4b3"; } .bi-paragraph::before { content: "\f4b4"; } .bi-patch-check-fill::before { content: "\f4b5"; } .bi-patch-check::before { content: "\f4b6"; } .bi-patch-exclamation-fill::before { content: "\f4b7"; } .bi-patch-exclamation::before { content: "\f4b8"; } .bi-patch-minus-fill::before { content: "\f4b9"; } .bi-patch-minus::before { content: "\f4ba"; } .bi-patch-plus-fill::before { content: "\f4bb"; } .bi-patch-plus::before { content: "\f4bc"; } .bi-patch-question-fill::before { content: "\f4bd"; } .bi-patch-question::before { content: "\f4be"; } .bi-pause-btn-fill::before { content: "\f4bf"; } .bi-pause-btn::before { content: "\f4c0"; } .bi-pause-circle-fill::before { content: "\f4c1"; } .bi-pause-circle::before { content: "\f4c2"; } .bi-pause-fill::before { content: "\f4c3"; } .bi-pause::before { content: "\f4c4"; } .bi-peace-fill::before { content: "\f4c5"; } .bi-peace::before { content: "\f4c6"; } .bi-pen-fill::before { content: "\f4c7"; } .bi-pen::before { content: "\f4c8"; } .bi-pencil-fill::before { content: "\f4c9"; } .bi-pencil-square::before { content: "\f4ca"; } .bi-pencil::before { content: "\f4cb"; } .bi-pentagon-fill::before { content: "\f4cc"; } .bi-pentagon-half::before { content: "\f4cd"; } .bi-pentagon::before { content: "\f4ce"; } .bi-people-fill::before { content: "\f4cf"; } .bi-people::before { content: "\f4d0"; } .bi-percent::before { content: "\f4d1"; } .bi-person-badge-fill::before { content: "\f4d2"; } .bi-person-badge::before { content: "\f4d3"; } .bi-person-bounding-box::before { content: "\f4d4"; } .bi-person-check-fill::before { content: "\f4d5"; } .bi-person-check::before { content: "\f4d6"; } .bi-person-circle::before { content: "\f4d7"; } .bi-person-dash-fill::before { content: "\f4d8"; } .bi-person-dash::before { content: "\f4d9"; } .bi-person-fill::before { content: "\f4da"; } .bi-person-lines-fill::before { content: "\f4db"; } .bi-person-plus-fill::before { content: "\f4dc"; } .bi-person-plus::before { content: "\f4dd"; } .bi-person-square::before { content: "\f4de"; } .bi-person-x-fill::before { content: "\f4df"; } .bi-person-x::before { content: "\f4e0"; } .bi-person::before { content: "\f4e1"; } .bi-phone-fill::before { content: "\f4e2"; } .bi-phone-landscape-fill::before { content: "\f4e3"; } .bi-phone-landscape::before { content: "\f4e4"; } .bi-phone-vibrate-fill::before { content: "\f4e5"; } .bi-phone-vibrate::before { content: "\f4e6"; } .bi-phone::before { content: "\f4e7"; } .bi-pie-chart-fill::before { content: "\f4e8"; } .bi-pie-chart::before { content: "\f4e9"; } .bi-pin-angle-fill::before { content: "\f4ea"; } .bi-pin-angle::before { content: "\f4eb"; } .bi-pin-fill::before { content: "\f4ec"; } .bi-pin::before { content: "\f4ed"; } .bi-pip-fill::before { content: "\f4ee"; } .bi-pip::before { content: "\f4ef"; } .bi-play-btn-fill::before { content: "\f4f0"; } .bi-play-btn::before { content: "\f4f1"; } .bi-play-circle-fill::before { content: "\f4f2"; } .bi-play-circle::before { content: "\f4f3"; } .bi-play-fill::before { content: "\f4f4"; } .bi-play::before { content: "\f4f5"; } .bi-plug-fill::before { content: "\f4f6"; } .bi-plug::before { content: "\f4f7"; } .bi-plus-circle-dotted::before { content: "\f4f8"; } .bi-plus-circle-fill::before { content: "\f4f9"; } .bi-plus-circle::before { content: "\f4fa"; } .bi-plus-square-dotted::before { content: "\f4fb"; } .bi-plus-square-fill::before { content: "\f4fc"; } .bi-plus-square::before { content: "\f4fd"; } .bi-plus::before { content: "\f4fe"; } .bi-power::before { content: "\f4ff"; } .bi-printer-fill::before { content: "\f500"; } .bi-printer::before { content: "\f501"; } .bi-puzzle-fill::before { content: "\f502"; } .bi-puzzle::before { content: "\f503"; } .bi-question-circle-fill::before { content: "\f504"; } .bi-question-circle::before { content: "\f505"; } .bi-question-diamond-fill::before { content: "\f506"; } .bi-question-diamond::before { content: "\f507"; } .bi-question-octagon-fill::before { content: "\f508"; } .bi-question-octagon::before { content: "\f509"; } .bi-question-square-fill::before { content: "\f50a"; } .bi-question-square::before { content: "\f50b"; } .bi-question::before { content: "\f50c"; } .bi-rainbow::before { content: "\f50d"; } .bi-receipt-cutoff::before { content: "\f50e"; } .bi-receipt::before { content: "\f50f"; } .bi-reception-0::before { content: "\f510"; } .bi-reception-1::before { content: "\f511"; } .bi-reception-2::before { content: "\f512"; } .bi-reception-3::before { content: "\f513"; } .bi-reception-4::before { content: "\f514"; } .bi-record-btn-fill::before { content: "\f515"; } .bi-record-btn::before { content: "\f516"; } .bi-record-circle-fill::before { content: "\f517"; } .bi-record-circle::before { content: "\f518"; } .bi-record-fill::before { content: "\f519"; } .bi-record::before { content: "\f51a"; } .bi-record2-fill::before { content: "\f51b"; } .bi-record2::before { content: "\f51c"; } .bi-reply-all-fill::before { content: "\f51d"; } .bi-reply-all::before { content: "\f51e"; } .bi-reply-fill::before { content: "\f51f"; } .bi-reply::before { content: "\f520"; } .bi-rss-fill::before { content: "\f521"; } .bi-rss::before { content: "\f522"; } .bi-rulers::before { content: "\f523"; } .bi-save-fill::before { content: "\f524"; } .bi-save::before { content: "\f525"; } .bi-save2-fill::before { content: "\f526"; } .bi-save2::before { content: "\f527"; } .bi-scissors::before { content: "\f528"; } .bi-screwdriver::before { content: "\f529"; } .bi-search::before { content: "\f52a"; } .bi-segmented-nav::before { content: "\f52b"; } .bi-server::before { content: "\f52c"; } .bi-share-fill::before { content: "\f52d"; } .bi-share::before { content: "\f52e"; } .bi-shield-check::before { content: "\f52f"; } .bi-shield-exclamation::before { content: "\f530"; } .bi-shield-fill-check::before { content: "\f531"; } .bi-shield-fill-exclamation::before { content: "\f532"; } .bi-shield-fill-minus::before { content: "\f533"; } .bi-shield-fill-plus::before { content: "\f534"; } .bi-shield-fill-x::before { content: "\f535"; } .bi-shield-fill::before { content: "\f536"; } .bi-shield-lock-fill::before { content: "\f537"; } .bi-shield-lock::before { content: "\f538"; } .bi-shield-minus::before { content: "\f539"; } .bi-shield-plus::before { content: "\f53a"; } .bi-shield-shaded::before { content: "\f53b"; } .bi-shield-slash-fill::before { content: "\f53c"; } .bi-shield-slash::before { content: "\f53d"; } .bi-shield-x::before { content: "\f53e"; } .bi-shield::before { content: "\f53f"; } .bi-shift-fill::before { content: "\f540"; } .bi-shift::before { content: "\f541"; } .bi-shop-window::before { content: "\f542"; } .bi-shop::before { content: "\f543"; } .bi-shuffle::before { content: "\f544"; } .bi-signpost-2-fill::before { content: "\f545"; } .bi-signpost-2::before { content: "\f546"; } .bi-signpost-fill::before { content: "\f547"; } .bi-signpost-split-fill::before { content: "\f548"; } .bi-signpost-split::before { content: "\f549"; } .bi-signpost::before { content: "\f54a"; } .bi-sim-fill::before { content: "\f54b"; } .bi-sim::before { content: "\f54c"; } .bi-skip-backward-btn-fill::before { content: "\f54d"; } .bi-skip-backward-btn::before { content: "\f54e"; } .bi-skip-backward-circle-fill::before { content: "\f54f"; } .bi-skip-backward-circle::before { content: "\f550"; } .bi-skip-backward-fill::before { content: "\f551"; } .bi-skip-backward::before { content: "\f552"; } .bi-skip-end-btn-fill::before { content: "\f553"; } .bi-skip-end-btn::before { content: "\f554"; } .bi-skip-end-circle-fill::before { content: "\f555"; } .bi-skip-end-circle::before { content: "\f556"; } .bi-skip-end-fill::before { content: "\f557"; } .bi-skip-end::before { content: "\f558"; } .bi-skip-forward-btn-fill::before { content: "\f559"; } .bi-skip-forward-btn::before { content: "\f55a"; } .bi-skip-forward-circle-fill::before { content: "\f55b"; } .bi-skip-forward-circle::before { content: "\f55c"; } .bi-skip-forward-fill::before { content: "\f55d"; } .bi-skip-forward::before { content: "\f55e"; } .bi-skip-start-btn-fill::before { content: "\f55f"; } .bi-skip-start-btn::before { content: "\f560"; } .bi-skip-start-circle-fill::before { content: "\f561"; } .bi-skip-start-circle::before { content: "\f562"; } .bi-skip-start-fill::before { content: "\f563"; } .bi-skip-start::before { content: "\f564"; } .bi-slack::before { content: "\f565"; } .bi-slash-circle-fill::before { content: "\f566"; } .bi-slash-circle::before { content: "\f567"; } .bi-slash-square-fill::before { content: "\f568"; } .bi-slash-square::before { content: "\f569"; } .bi-slash::before { content: "\f56a"; } .bi-sliders::before { content: "\f56b"; } .bi-smartwatch::before { content: "\f56c"; } .bi-snow::before { content: "\f56d"; } .bi-snow2::before { content: "\f56e"; } .bi-snow3::before { content: "\f56f"; } .bi-sort-alpha-down-alt::before { content: "\f570"; } .bi-sort-alpha-down::before { content: "\f571"; } .bi-sort-alpha-up-alt::before { content: "\f572"; } .bi-sort-alpha-up::before { content: "\f573"; } .bi-sort-down-alt::before { content: "\f574"; } .bi-sort-down::before { content: "\f575"; } .bi-sort-numeric-down-alt::before { content: "\f576"; } .bi-sort-numeric-down::before { content: "\f577"; } .bi-sort-numeric-up-alt::before { content: "\f578"; } .bi-sort-numeric-up::before { content: "\f579"; } .bi-sort-up-alt::before { content: "\f57a"; } .bi-sort-up::before { content: "\f57b"; } .bi-soundwave::before { content: "\f57c"; } .bi-speaker-fill::before { content: "\f57d"; } .bi-speaker::before { content: "\f57e"; } .bi-speedometer::before { content: "\f57f"; } .bi-speedometer2::before { content: "\f580"; } .bi-spellcheck::before { content: "\f581"; } .bi-square-fill::before { content: "\f582"; } .bi-square-half::before { content: "\f583"; } .bi-square::before { content: "\f584"; } .bi-stack::before { content: "\f585"; } .bi-star-fill::before { content: "\f586"; } .bi-star-half::before { content: "\f587"; } .bi-star::before { content: "\f588"; } .bi-stars::before { content: "\f589"; } .bi-stickies-fill::before { content: "\f58a"; } .bi-stickies::before { content: "\f58b"; } .bi-sticky-fill::before { content: "\f58c"; } .bi-sticky::before { content: "\f58d"; } .bi-stop-btn-fill::before { content: "\f58e"; } .bi-stop-btn::before { content: "\f58f"; } .bi-stop-circle-fill::before { content: "\f590"; } .bi-stop-circle::before { content: "\f591"; } .bi-stop-fill::before { content: "\f592"; } .bi-stop::before { content: "\f593"; } .bi-stoplights-fill::before { content: "\f594"; } .bi-stoplights::before { content: "\f595"; } .bi-stopwatch-fill::before { content: "\f596"; } .bi-stopwatch::before { content: "\f597"; } .bi-subtract::before { content: "\f598"; } .bi-suit-club-fill::before { content: "\f599"; } .bi-suit-club::before { content: "\f59a"; } .bi-suit-diamond-fill::before { content: "\f59b"; } .bi-suit-diamond::before { content: "\f59c"; } .bi-suit-heart-fill::before { content: "\f59d"; } .bi-suit-heart::before { content: "\f59e"; } .bi-suit-spade-fill::before { content: "\f59f"; } .bi-suit-spade::before { content: "\f5a0"; } .bi-sun-fill::before { content: "\f5a1"; } .bi-sun::before { content: "\f5a2"; } .bi-sunglasses::before { content: "\f5a3"; } .bi-sunrise-fill::before { content: "\f5a4"; } .bi-sunrise::before { content: "\f5a5"; } .bi-sunset-fill::before { content: "\f5a6"; } .bi-sunset::before { content: "\f5a7"; } .bi-symmetry-horizontal::before { content: "\f5a8"; } .bi-symmetry-vertical::before { content: "\f5a9"; } .bi-table::before { content: "\f5aa"; } .bi-tablet-fill::before { content: "\f5ab"; } .bi-tablet-landscape-fill::before { content: "\f5ac"; } .bi-tablet-landscape::before { content: "\f5ad"; } .bi-tablet::before { content: "\f5ae"; } .bi-tag-fill::before { content: "\f5af"; } .bi-tag::before { content: "\f5b0"; } .bi-tags-fill::before { content: "\f5b1"; } .bi-tags::before { content: "\f5b2"; } .bi-telegram::before { content: "\f5b3"; } .bi-telephone-fill::before { content: "\f5b4"; } .bi-telephone-forward-fill::before { content: "\f5b5"; } .bi-telephone-forward::before { content: "\f5b6"; } .bi-telephone-inbound-fill::before { content: "\f5b7"; } .bi-telephone-inbound::before { content: "\f5b8"; } .bi-telephone-minus-fill::before { content: "\f5b9"; } .bi-telephone-minus::before { content: "\f5ba"; } .bi-telephone-outbound-fill::before { content: "\f5bb"; } .bi-telephone-outbound::before { content: "\f5bc"; } .bi-telephone-plus-fill::before { content: "\f5bd"; } .bi-telephone-plus::before { content: "\f5be"; } .bi-telephone-x-fill::before { content: "\f5bf"; } .bi-telephone-x::before { content: "\f5c0"; } .bi-telephone::before { content: "\f5c1"; } .bi-terminal-fill::before { content: "\f5c2"; } .bi-terminal::before { content: "\f5c3"; } .bi-text-center::before { content: "\f5c4"; } .bi-text-indent-left::before { content: "\f5c5"; } .bi-text-indent-right::before { content: "\f5c6"; } .bi-text-left::before { content: "\f5c7"; } .bi-text-paragraph::before { content: "\f5c8"; } .bi-text-right::before { content: "\f5c9"; } .bi-textarea-resize::before { content: "\f5ca"; } .bi-textarea-t::before { content: "\f5cb"; } .bi-textarea::before { content: "\f5cc"; } .bi-thermometer-half::before { content: "\f5cd"; } .bi-thermometer-high::before { content: "\f5ce"; } .bi-thermometer-low::before { content: "\f5cf"; } .bi-thermometer-snow::before { content: "\f5d0"; } .bi-thermometer-sun::before { content: "\f5d1"; } .bi-thermometer::before { content: "\f5d2"; } .bi-three-dots-vertical::before { content: "\f5d3"; } .bi-three-dots::before { content: "\f5d4"; } .bi-toggle-off::before { content: "\f5d5"; } .bi-toggle-on::before { content: "\f5d6"; } .bi-toggle2-off::before { content: "\f5d7"; } .bi-toggle2-on::before { content: "\f5d8"; } .bi-toggles::before { content: "\f5d9"; } .bi-toggles2::before { content: "\f5da"; } .bi-tools::before { content: "\f5db"; } .bi-tornado::before { content: "\f5dc"; } .bi-trash-fill::before { content: "\f5dd"; } .bi-trash::before { content: "\f5de"; } .bi-trash2-fill::before { content: "\f5df"; } .bi-trash2::before { content: "\f5e0"; } .bi-tree-fill::before { content: "\f5e1"; } .bi-tree::before { content: "\f5e2"; } .bi-triangle-fill::before { content: "\f5e3"; } .bi-triangle-half::before { content: "\f5e4"; } .bi-triangle::before { content: "\f5e5"; } .bi-trophy-fill::before { content: "\f5e6"; } .bi-trophy::before { content: "\f5e7"; } .bi-tropical-storm::before { content: "\f5e8"; } .bi-truck-flatbed::before { content: "\f5e9"; } .bi-truck::before { content: "\f5ea"; } .bi-tsunami::before { content: "\f5eb"; } .bi-tv-fill::before { content: "\f5ec"; } .bi-tv::before { content: "\f5ed"; } .bi-twitch::before { content: "\f5ee"; } .bi-twitter::before { content: "\f5ef"; } .bi-type-bold::before { content: "\f5f0"; } .bi-type-h1::before { content: "\f5f1"; } .bi-type-h2::before { content: "\f5f2"; } .bi-type-h3::before { content: "\f5f3"; } .bi-type-italic::before { content: "\f5f4"; } .bi-type-strikethrough::before { content: "\f5f5"; } .bi-type-underline::before { content: "\f5f6"; } .bi-type::before { content: "\f5f7"; } .bi-ui-checks-grid::before { content: "\f5f8"; } .bi-ui-checks::before { content: "\f5f9"; } .bi-ui-radios-grid::before { content: "\f5fa"; } .bi-ui-radios::before { content: "\f5fb"; } .bi-umbrella-fill::before { content: "\f5fc"; } .bi-umbrella::before { content: "\f5fd"; } .bi-union::before { content: "\f5fe"; } .bi-unlock-fill::before { content: "\f5ff"; } .bi-unlock::before { content: "\f600"; } .bi-upc-scan::before { content: "\f601"; } .bi-upc::before { content: "\f602"; } .bi-upload::before { content: "\f603"; } .bi-vector-pen::before { content: "\f604"; } .bi-view-list::before { content: "\f605"; } .bi-view-stacked::before { content: "\f606"; } .bi-vinyl-fill::before { content: "\f607"; } .bi-vinyl::before { content: "\f608"; } .bi-voicemail::before { content: "\f609"; } .bi-volume-down-fill::before { content: "\f60a"; } .bi-volume-down::before { content: "\f60b"; } .bi-volume-mute-fill::before { content: "\f60c"; } .bi-volume-mute::before { content: "\f60d"; } .bi-volume-off-fill::before { content: "\f60e"; } .bi-volume-off::before { content: "\f60f"; } .bi-volume-up-fill::before { content: "\f610"; } .bi-volume-up::before { content: "\f611"; } .bi-vr::before { content: "\f612"; } .bi-wallet-fill::before { content: "\f613"; } .bi-wallet::before { content: "\f614"; } .bi-wallet2::before { content: "\f615"; } .bi-watch::before { content: "\f616"; } .bi-water::before { content: "\f617"; } .bi-whatsapp::before { content: "\f618"; } .bi-wifi-1::before { content: "\f619"; } .bi-wifi-2::before { content: "\f61a"; } .bi-wifi-off::before { content: "\f61b"; } .bi-wifi::before { content: "\f61c"; } .bi-wind::before { content: "\f61d"; } .bi-window-dock::before { content: "\f61e"; } .bi-window-sidebar::before { content: "\f61f"; } .bi-window::before { content: "\f620"; } .bi-wrench::before { content: "\f621"; } .bi-x-circle-fill::before { content: "\f622"; } .bi-x-circle::before { content: "\f623"; } .bi-x-diamond-fill::before { content: "\f624"; } .bi-x-diamond::before { content: "\f625"; } .bi-x-octagon-fill::before { content: "\f626"; } .bi-x-octagon::before { content: "\f627"; } .bi-x-square-fill::before { content: "\f628"; } .bi-x-square::before { content: "\f629"; } .bi-x::before { content: "\f62a"; } .bi-youtube::before { content: "\f62b"; } .bi-zoom-in::before { content: "\f62c"; } .bi-zoom-out::before { content: "\f62d"; } .bi-bank::before { content: "\f62e"; } .bi-bank2::before { content: "\f62f"; } .bi-bell-slash-fill::before { content: "\f630"; } .bi-bell-slash::before { content: "\f631"; } .bi-cash-coin::before { content: "\f632"; } .bi-check-lg::before { content: "\f633"; } .bi-coin::before { content: "\f634"; } .bi-currency-bitcoin::before { content: "\f635"; } .bi-currency-dollar::before { content: "\f636"; } .bi-currency-euro::before { content: "\f637"; } .bi-currency-exchange::before { content: "\f638"; } .bi-currency-pound::before { content: "\f639"; } .bi-currency-yen::before { content: "\f63a"; } .bi-dash-lg::before { content: "\f63b"; } .bi-exclamation-lg::before { content: "\f63c"; } .bi-file-earmark-pdf-fill::before { content: "\f63d"; } .bi-file-earmark-pdf::before { content: "\f63e"; } .bi-file-pdf-fill::before { content: "\f63f"; } .bi-file-pdf::before { content: "\f640"; } .bi-gender-ambiguous::before { content: "\f641"; } .bi-gender-female::before { content: "\f642"; } .bi-gender-male::before { content: "\f643"; } .bi-gender-trans::before { content: "\f644"; } .bi-headset-vr::before { content: "\f645"; } .bi-info-lg::before { content: "\f646"; } .bi-mastodon::before { content: "\f647"; } .bi-messenger::before { content: "\f648"; } .bi-piggy-bank-fill::before { content: "\f649"; } .bi-piggy-bank::before { content: "\f64a"; } .bi-pin-map-fill::before { content: "\f64b"; } .bi-pin-map::before { content: "\f64c"; } .bi-plus-lg::before { content: "\f64d"; } .bi-question-lg::before { content: "\f64e"; } .bi-recycle::before { content: "\f64f"; } .bi-reddit::before { content: "\f650"; } .bi-safe-fill::before { content: "\f651"; } .bi-safe2-fill::before { content: "\f652"; } .bi-safe2::before { content: "\f653"; } .bi-sd-card-fill::before { content: "\f654"; } .bi-sd-card::before { content: "\f655"; } .bi-skype::before { content: "\f656"; } .bi-slash-lg::before { content: "\f657"; } .bi-translate::before { content: "\f658"; } .bi-x-lg::before { content: "\f659"; } .bi-safe::before { content: "\f65a"; } .bi-apple::before { content: "\f65b"; } .bi-microsoft::before { content: "\f65d"; } .bi-windows::before { content: "\f65e"; } .bi-behance::before { content: "\f65c"; } .bi-dribbble::before { content: "\f65f"; } .bi-line::before { content: "\f660"; } .bi-medium::before { content: "\f661"; } .bi-paypal::before { content: "\f662"; } .bi-pinterest::before { content: "\f663"; } .bi-signal::before { content: "\f664"; } .bi-snapchat::before { content: "\f665"; } .bi-spotify::before { content: "\f666"; } .bi-stack-overflow::before { content: "\f667"; } .bi-strava::before { content: "\f668"; } .bi-wordpress::before { content: "\f669"; } .bi-vimeo::before { content: "\f66a"; } .bi-activity::before { content: "\f66b"; } .bi-easel2-fill::before { content: "\f66c"; } .bi-easel2::before { content: "\f66d"; } .bi-easel3-fill::before { content: "\f66e"; } .bi-easel3::before { content: "\f66f"; } .bi-fan::before { content: "\f670"; } .bi-fingerprint::before { content: "\f671"; } .bi-graph-down-arrow::before { content: "\f672"; } .bi-graph-up-arrow::before { content: "\f673"; } .bi-hypnotize::before { content: "\f674"; } .bi-magic::before { content: "\f675"; } .bi-person-rolodex::before { content: "\f676"; } .bi-person-video::before { content: "\f677"; } .bi-person-video2::before { content: "\f678"; } .bi-person-video3::before { content: "\f679"; } .bi-person-workspace::before { content: "\f67a"; } .bi-radioactive::before { content: "\f67b"; } .bi-webcam-fill::before { content: "\f67c"; } .bi-webcam::before { content: "\f67d"; } .bi-yin-yang::before { content: "\f67e"; } .bi-bandaid-fill::before { content: "\f680"; } .bi-bandaid::before { content: "\f681"; } .bi-bluetooth::before { content: "\f682"; } .bi-body-text::before { content: "\f683"; } .bi-boombox::before { content: "\f684"; } .bi-boxes::before { content: "\f685"; } .bi-dpad-fill::before { content: "\f686"; } .bi-dpad::before { content: "\f687"; } .bi-ear-fill::before { content: "\f688"; } .bi-ear::before { content: "\f689"; } .bi-envelope-check-1::before { content: "\f68a"; } .bi-envelope-check-fill::before { content: "\f68b"; } .bi-envelope-check::before { content: "\f68c"; } .bi-envelope-dash-1::before { content: "\f68d"; } .bi-envelope-dash-fill::before { content: "\f68e"; } .bi-envelope-dash::before { content: "\f68f"; } .bi-envelope-exclamation-1::before { content: "\f690"; } .bi-envelope-exclamation-fill::before { content: "\f691"; } .bi-envelope-exclamation::before { content: "\f692"; } .bi-envelope-plus-fill::before { content: "\f693"; } .bi-envelope-plus::before { content: "\f694"; } .bi-envelope-slash-1::before { content: "\f695"; } .bi-envelope-slash-fill::before { content: "\f696"; } .bi-envelope-slash::before { content: "\f697"; } .bi-envelope-x-1::before { content: "\f698"; } .bi-envelope-x-fill::before { content: "\f699"; } .bi-envelope-x::before { content: "\f69a"; } .bi-explicit-fill::before { content: "\f69b"; } .bi-explicit::before { content: "\f69c"; } .bi-git::before { content: "\f69d"; } .bi-infinity::before { content: "\f69e"; } .bi-list-columns-reverse::before { content: "\f69f"; } .bi-list-columns::before { content: "\f6a0"; } .bi-meta::before { content: "\f6a1"; } .bi-mortorboard-fill::before { content: "\f6a2"; } .bi-mortorboard::before { content: "\f6a3"; } .bi-nintendo-switch::before { content: "\f6a4"; } .bi-pc-display-horizontal::before { content: "\f6a5"; } .bi-pc-display::before { content: "\f6a6"; } .bi-pc-horizontal::before { content: "\f6a7"; } .bi-pc::before { content: "\f6a8"; } .bi-playstation::before { content: "\f6a9"; } .bi-plus-slash-minus::before { content: "\f6aa"; } .bi-projector-fill::before { content: "\f6ab"; } .bi-projector::before { content: "\f6ac"; } .bi-qr-code-scan::before { content: "\f6ad"; } .bi-qr-code::before { content: "\f6ae"; } .bi-quora::before { content: "\f6af"; } .bi-quote::before { content: "\f6b0"; } .bi-robot::before { content: "\f6b1"; } .bi-send-check-fill::before { content: "\f6b2"; } .bi-send-check::before { content: "\f6b3"; } .bi-send-dash-fill::before { content: "\f6b4"; } .bi-send-dash::before { content: "\f6b5"; } .bi-send-exclamation-1::before { content: "\f6b6"; } .bi-send-exclamation-fill::before { content: "\f6b7"; } .bi-send-exclamation::before { content: "\f6b8"; } .bi-send-fill::before { content: "\f6b9"; } .bi-send-plus-fill::before { content: "\f6ba"; } .bi-send-plus::before { content: "\f6bb"; } .bi-send-slash-fill::before { content: "\f6bc"; } .bi-send-slash::before { content: "\f6bd"; } .bi-send-x-fill::before { content: "\f6be"; } .bi-send-x::before { content: "\f6bf"; } .bi-send::before { content: "\f6c0"; } .bi-steam::before { content: "\f6c1"; } .bi-terminal-dash-1::before { content: "\f6c2"; } .bi-terminal-dash::before { content: "\f6c3"; } .bi-terminal-plus::before { content: "\f6c4"; } .bi-terminal-split::before { content: "\f6c5"; } .bi-ticket-detailed-fill::before { content: "\f6c6"; } .bi-ticket-detailed::before { content: "\f6c7"; } .bi-ticket-fill::before { content: "\f6c8"; } .bi-ticket-perferated-fill::before { content: "\f6c9"; } .bi-ticket-perferated::before { content: "\f6ca"; } .bi-ticket::before { content: "\f6cb"; } .bi-tiktok::before { content: "\f6cc"; } .bi-window-dash::before { content: "\f6cd"; } .bi-window-desktop::before { content: "\f6ce"; } .bi-window-fullscreen::before { content: "\f6cf"; } .bi-window-plus::before { content: "\f6d0"; } .bi-window-split::before { content: "\f6d1"; } .bi-window-stack::before { content: "\f6d2"; } .bi-window-x::before { content: "\f6d3"; } .bi-xbox::before { content: "\f6d4"; } .bi-ethernet::before { content: "\f6d5"; } .bi-hdmi-fill::before { content: "\f6d6"; } .bi-hdmi::before { content: "\f6d7"; } .bi-usb-c-fill::before { content: "\f6d8"; } .bi-usb-c::before { content: "\f6d9"; } .bi-usb-fill::before { content: "\f6da"; } .bi-usb-plug-fill::before { content: "\f6db"; } .bi-usb-plug::before { content: "\f6dc"; } .bi-usb-symbol::before { content: "\f6dd"; } .bi-usb::before { content: "\f6de"; } .bi-boombox-fill::before { content: "\f6df"; } .bi-displayport-1::before { content: "\f6e0"; } .bi-displayport::before { content: "\f6e1"; } .bi-gpu-card::before { content: "\f6e2"; } .bi-memory::before { content: "\f6e3"; } .bi-modem-fill::before { content: "\f6e4"; } .bi-modem::before { content: "\f6e5"; } .bi-motherboard-fill::before { content: "\f6e6"; } .bi-motherboard::before { content: "\f6e7"; } .bi-optical-audio-fill::before { content: "\f6e8"; } .bi-optical-audio::before { content: "\f6e9"; } .bi-pci-card::before { content: "\f6ea"; } .bi-router-fill::before { content: "\f6eb"; } .bi-router::before { content: "\f6ec"; } .bi-ssd-fill::before { content: "\f6ed"; } .bi-ssd::before { content: "\f6ee"; } .bi-thunderbolt-fill::before { content: "\f6ef"; } .bi-thunderbolt::before { content: "\f6f0"; } .bi-usb-drive-fill::before { content: "\f6f1"; } .bi-usb-drive::before { content: "\f6f2"; } .bi-usb-micro-fill::before { content: "\f6f3"; } .bi-usb-micro::before { content: "\f6f4"; } .bi-usb-mini-fill::before { content: "\f6f5"; } .bi-usb-mini::before { content: "\f6f6"; } .bi-cloud-haze2::before { content: "\f6f7"; } .bi-device-hdd-fill::before { content: "\f6f8"; } .bi-device-hdd::before { content: "\f6f9"; } .bi-device-ssd-fill::before { content: "\f6fa"; } .bi-device-ssd::before { content: "\f6fb"; } .bi-displayport-fill::before { content: "\f6fc"; } .bi-mortarboard-fill::before { content: "\f6fd"; } .bi-mortarboard::before { content: "\f6fe"; }
transformers.js/examples/demo-site/public/css/bootstrap-icons.css/0
{ "file_path": "transformers.js/examples/demo-site/public/css/bootstrap-icons.css", "repo_id": "transformers.js", "token_count": 29385 }
303
///////////////////////////////////////////////////////////////// // Worker.js file for doing all transformer-based computations // // Needed to ensure the UI thread is not blocked when running // ///////////////////////////////////////////////////////////////// import { pipeline, env } from "@xenova/transformers"; env.allowLocalModels = false; // Define task function mapping const TASK_FUNCTION_MAPPING = { 'translation': translate, 'text-generation': text_generation, 'code-completion': code_completion, 'masked-language-modelling': masked_lm, 'sequence-classification': sequence_classification, 'token-classification': token_classification, 'zero-shot-classification': zero_shot_classification, 'question-answering': question_answering, 'summarization': summarize, 'automatic-speech-recognition': speech_to_text, 'image-to-text': image_to_text, 'image-classification': image_classification, 'zero-shot-image-classification': zero_shot_image_classification, 'object-detection': object_detection, } // Listen for messages from UI self.addEventListener('message', async (event) => { const data = event.data; let fn = TASK_FUNCTION_MAPPING[data.task]; if (!fn) return; let result = await fn(data); self.postMessage({ task: data.task, type: 'result', data: result }); }); // Define model factories // Ensures only one model is created of each type class PipelineFactory { static task = null; static model = null; // NOTE: instance stores a promise that resolves to the pipeline static instance = null; constructor(tokenizer, model) { this.tokenizer = tokenizer; this.model = model; } /** * Get pipeline instance * @param {*} progressCallback * @returns {Promise} */ static getInstance(progressCallback = null) { if (this.task === null || this.model === null) { throw Error("Must set task and model") } if (this.instance === null) { this.instance = pipeline(this.task, this.model, { progress_callback: progressCallback }); } return this.instance; } } class TranslationPipelineFactory extends PipelineFactory { static task = 'translation'; static model = 'Xenova/t5-small'; } class TextGenerationPipelineFactory extends PipelineFactory { static task = 'text-generation'; static model = 'Xenova/distilgpt2'; } class CodeCompletionPipelineFactory extends PipelineFactory { static task = 'text-generation'; static model = 'Xenova/codegen-350M-mono'; } class MaskedLMPipelineFactory extends PipelineFactory { static task = 'fill-mask'; static model = 'Xenova/bert-base-cased'; } class SequenceClassificationPipelineFactory extends PipelineFactory { static task = 'text-classification'; static model = 'Xenova/bert-base-multilingual-uncased-sentiment'; } class TokenClassificationPipelineFactory extends PipelineFactory { static task = 'token-classification'; static model = 'Xenova/bert-base-multilingual-cased-ner-hrl'; } class ZeroShotClassificationPipelineFactory extends PipelineFactory { static task = 'zero-shot-classification'; static model = 'Xenova/distilbert-base-uncased-mnli'; } class QuestionAnsweringPipelineFactory extends PipelineFactory { static task = 'question-answering'; static model = 'Xenova/distilbert-base-cased-distilled-squad'; } class SummarizationPipelineFactory extends PipelineFactory { static task = 'summarization'; static model = 'Xenova/distilbart-cnn-6-6'; } class AutomaticSpeechRecognitionPipelineFactory extends PipelineFactory { static task = 'automatic-speech-recognition'; static model = 'Xenova/whisper-tiny.en'; } class ImageToTextPipelineFactory extends PipelineFactory { static task = 'image-to-text'; static model = 'Xenova/vit-gpt2-image-captioning'; } class ImageClassificationPipelineFactory extends PipelineFactory { static task = 'image-classification'; static model = 'Xenova/vit-base-patch16-224'; } class ZeroShotImageClassificationPipelineFactory extends PipelineFactory { static task = 'zero-shot-image-classification'; static model = 'Xenova/clip-vit-base-patch16'; } class ObjectDetectionPipelineFactory extends PipelineFactory { static task = 'object-detection'; static model = 'Xenova/detr-resnet-50'; } async function translate(data) { let pipeline = await TranslationPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'translation', data: data }); }) // Update task based on source and target languages // Doing it this way prevents the same model from being loaded multiple times pipeline.task = `translation_${data.languageFrom}_to_${data.languageTo}`; return await pipeline(data.text, { ...data.generation, callback_function: function (beams) { const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, { skip_special_tokens: true, }) self.postMessage({ type: 'update', target: data.elementIdToUpdate, data: decodedText }); } }) } async function text_generation(data) { let pipeline = await TextGenerationPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'text-generation', data: data }); }) let text = data.text.trim(); return await pipeline(text, { ...data.generation, callback_function: function (beams) { const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, { skip_special_tokens: true, }) self.postMessage({ type: 'update', target: data.elementIdToUpdate, data: decodedText }); } }) } async function code_completion(data) { let pipeline = await CodeCompletionPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'code-completion', data: data, }); }) let text = data.text; return await pipeline(text, { ...data.generation, callback_function: function (beams) { const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, { skip_special_tokens: true, }) self.postMessage({ type: 'update', target: data.elementIdToUpdate, targetType: data.targetType, data: decodedText }); } }) } async function masked_lm(data) { let pipeline = await MaskedLMPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'masked-language-modelling', data: data }); }) let output = await pipeline(data.text, data.generation) self.postMessage({ type: 'update', target: data.elementIdToUpdate, data: output.map(x => x.sequence).join('\n') }); return output; } async function sequence_classification(data) { let pipeline = await SequenceClassificationPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'sequence-classification', data: data }); }); let outputs = await pipeline(data.text, { topk: 5 // return all }) self.postMessage({ type: 'complete', target: data.elementIdToUpdate, targetType: data.targetType, data: outputs }); } async function token_classification(data) { let pipeline = await TokenClassificationPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'token-classification', data: data }); }); let outputs = await pipeline(data.text, { ignore_labels: [] // Return all labels }); let chunks = []; let currentChunk = { type: '', text: [] }; for (let i = 0; i < outputs.length; i++) { let word = pipeline.tokenizer.model.tokens_to_ids.get(outputs[i].word); let entity = outputs[i].entity; if (entity.startsWith('B-')) { // beginning of a new chunk if (currentChunk.text.length > 0) { // push the current chunk if it exists chunks.push(currentChunk); currentChunk = { type: '', text: [] }; } currentChunk.type = entity.slice(2); // get the type of the chunk currentChunk.text = [word]; } else if (entity.startsWith('I-')) { // continuation of a chunk currentChunk.text.push(word); } else { // not part of a chunk (O tag) if (currentChunk.text.length > 0) { // push the current chunk if it exists if (currentChunk.type === 'O') { currentChunk.text.push(word); } else { chunks.push(currentChunk); currentChunk = { type: 'O', text: [word] }; } } else { currentChunk = { type: 'O', text: [word] }; } } } // push the last chunk if it exists if (currentChunk.text.length > 0) { chunks.push(currentChunk); } let postProcessedChunks = chunks.map( x => ({ type: x.type, text: pipeline.tokenizer.decode(x.text) }) ) self.postMessage({ type: 'complete', target: data.elementIdToUpdate, targetType: data.targetType, data: postProcessedChunks, }); } async function zero_shot_classification(data) { let pipeline = await ZeroShotClassificationPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'zero-shot-classification', data: data }); }); let outputs = await pipeline(data.text, data.classes, data.generation); let formattedOutputs = outputs.labels.map((x, i) => { return { label: x, score: outputs.scores[i], } }); self.postMessage({ type: 'complete', target: data.elementIdToUpdate, targetType: data.targetType, data: formattedOutputs }); } async function question_answering(data) { let pipeline = await QuestionAnsweringPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'question-answering', data: data }); }) let answer = await pipeline(data.question, data.context) self.postMessage({ type: 'complete', target: data.elementIdToUpdate, data: answer.answer }); return answer; } async function summarize(data) { let pipeline = await SummarizationPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'summarization', data: data }); }) return await pipeline(data.text, { ...data.generation, callback_function: function (beams) { const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, { skip_special_tokens: true, }) self.postMessage({ type: 'update', target: data.elementIdToUpdate, data: decodedText.trim() }); } }) } async function speech_to_text(data) { let pipeline = await AutomaticSpeechRecognitionPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'automatic-speech-recognition', data: data }); }) return await pipeline(data.audio, { // Choose good defaults for the demo chunk_length_s: 30, stride_length_s: 5, ...data.generation, callback_function: function (beams) { const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, { skip_special_tokens: true, }) self.postMessage({ type: 'update', target: data.elementIdToUpdate, data: decodedText.trim() }); } }) } async function image_to_text(data) { let pipeline = await ImageToTextPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'image-to-text', data: data }); }) return await pipeline(data.image, { ...data.generation, callback_function: function (beams) { const decodedText = pipeline.tokenizer.decode(beams[0].output_token_ids, { skip_special_tokens: true, }) self.postMessage({ type: 'update', target: data.elementIdToUpdate, data: decodedText.trim() }); } }) } async function image_classification(data) { let pipeline = await ImageClassificationPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'image-classification', data: data }); }) let outputs = await pipeline(data.image, { topk: 5 // return all }) self.postMessage({ type: 'complete', target: data.elementIdToUpdate, targetType: data.targetType, updateLabels: data.updateLabels, data: outputs }); } async function zero_shot_image_classification(data) { let pipeline = await ZeroShotImageClassificationPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'image-classification', data: data }); }) let outputs = await pipeline(data.image, data.classes) self.postMessage({ type: 'complete', target: data.elementIdToUpdate, targetType: data.targetType, updateLabels: data.updateLabels, data: outputs }); } async function object_detection(data) { let pipeline = await ObjectDetectionPipelineFactory.getInstance(data => { self.postMessage({ type: 'download', task: 'object-detection', data: data }); }) let outputs = await pipeline(data.image, { threshold: 0.9, percentage: true }) self.postMessage({ type: 'complete', target: data.elementIdToUpdate, targetType: data.targetType, chartId: data.chartId, data: outputs }); }
transformers.js/examples/demo-site/src/worker.js/0
{ "file_path": "transformers.js/examples/demo-site/src/worker.js", "repo_id": "transformers.js", "token_count": 6373 }
304
<!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <title>Transformers.js | Sample Electron application</title> <!-- Load styles --> <link rel="stylesheet" href="index.css" /> <!-- Load the client script --> <script src="./client.js" defer></script> </head> <body> <div class="container"> <h1>Transformers.js</h1> <h2>Run 🤗 Transformers in Electron!</h2> <input id="text" placeholder="Enter text here"> <pre id="output"></pre> </div> </body> </html>
transformers.js/examples/electron/src/index.html/0
{ "file_path": "transformers.js/examples/electron/src/index.html", "repo_id": "transformers.js", "token_count": 220 }
305
/** @type {import('next').NextConfig} */ const nextConfig = { // (Optional) Export as a standalone site // See https://nextjs.org/docs/pages/api-reference/next-config-js/output#automatically-copying-traced-files output: 'standalone', // Feel free to modify/remove this option // Indicate that these packages should not be bundled by webpack experimental: { serverComponentsExternalPackages: ['sharp', 'onnxruntime-node'], }, }; module.exports = nextConfig;
transformers.js/examples/next-server/next.config.js/0
{ "file_path": "transformers.js/examples/next-server/next.config.js", "repo_id": "transformers.js", "token_count": 164 }
306
{ "name": "commonjs", "version": "1.0.0", "description": "Server-side inference with Transformers.js (CommonJS)", "main": "app.js", "keywords": [], "author": "Xenova", "license": "ISC", "dependencies": { "@xenova/transformers": "^2.0.0" } }
transformers.js/examples/node/commonjs/package.json/0
{ "file_path": "transformers.js/examples/node/commonjs/package.json", "repo_id": "transformers.js", "token_count": 109 }
307
{ "name": "semantic-audio-search", "version": "0.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "semantic-audio-search", "version": "0.0.0", "dependencies": { "@xenova/transformers": "^2.10.1", "deepscatter": "github:nomic-ai/deepscatter" }, "devDependencies": { "vite": "^5.0.13" } }, "node_modules/@75lb/deep-merge": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@75lb/deep-merge/-/deep-merge-1.1.1.tgz", "integrity": "sha512-xvgv6pkMGBA6GwdyJbNAnDmfAIR/DfWhrj9jgWh3TY7gRm3KO46x/GPjRg6wJ0nOepwqrNxFfojebh0Df4h4Tw==", "peer": true, "dependencies": { "lodash.assignwith": "^4.2.0", "typical": "^7.1.1" }, "engines": { "node": ">=12.17" } }, "node_modules/@75lb/deep-merge/node_modules/typical": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/typical/-/typical-7.1.1.tgz", "integrity": "sha512-T+tKVNs6Wu7IWiAce5BgMd7OZfNYUndHwc5MknN+UHOudi7sGZzuHdCadllRuqJ3fPtgFtIH9+lt9qRv6lmpfA==", "peer": true, "engines": { "node": ">=12.17" } }, "node_modules/@esbuild/android-arm": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.19.8.tgz", "integrity": "sha512-31E2lxlGM1KEfivQl8Yf5aYU/mflz9g06H6S15ITUFQueMFtFjESRMoDSkvMo8thYvLBax+VKTPlpnx+sPicOA==", "cpu": [ "arm" ], "dev": true, "optional": true, "os": [ "android" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/android-arm64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.19.8.tgz", "integrity": "sha512-B8JbS61bEunhfx8kasogFENgQfr/dIp+ggYXwTqdbMAgGDhRa3AaPpQMuQU0rNxDLECj6FhDzk1cF9WHMVwrtA==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "android" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/android-x64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.19.8.tgz", "integrity": "sha512-rdqqYfRIn4jWOp+lzQttYMa2Xar3OK9Yt2fhOhzFXqg0rVWEfSclJvZq5fZslnz6ypHvVf3CT7qyf0A5pM682A==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "android" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/darwin-arm64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.8.tgz", "integrity": "sha512-RQw9DemMbIq35Bprbboyf8SmOr4UXsRVxJ97LgB55VKKeJOOdvsIPy0nFyF2l8U+h4PtBx/1kRf0BelOYCiQcw==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "darwin" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/darwin-x64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.19.8.tgz", "integrity": "sha512-3sur80OT9YdeZwIVgERAysAbwncom7b4bCI2XKLjMfPymTud7e/oY4y+ci1XVp5TfQp/bppn7xLw1n/oSQY3/Q==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "darwin" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/freebsd-arm64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.8.tgz", "integrity": "sha512-WAnPJSDattvS/XtPCTj1tPoTxERjcTpH6HsMr6ujTT+X6rylVe8ggxk8pVxzf5U1wh5sPODpawNicF5ta/9Tmw==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "freebsd" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/freebsd-x64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.19.8.tgz", "integrity": "sha512-ICvZyOplIjmmhjd6mxi+zxSdpPTKFfyPPQMQTK/w+8eNK6WV01AjIztJALDtwNNfFhfZLux0tZLC+U9nSyA5Zg==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "freebsd" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-arm": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.19.8.tgz", "integrity": "sha512-H4vmI5PYqSvosPaTJuEppU9oz1dq2A7Mr2vyg5TF9Ga+3+MGgBdGzcyBP7qK9MrwFQZlvNyJrvz6GuCaj3OukQ==", "cpu": [ "arm" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-arm64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.19.8.tgz", "integrity": "sha512-z1zMZivxDLHWnyGOctT9JP70h0beY54xDDDJt4VpTX+iwA77IFsE1vCXWmprajJGa+ZYSqkSbRQ4eyLCpCmiCQ==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-ia32": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.19.8.tgz", "integrity": "sha512-1a8suQiFJmZz1khm/rDglOc8lavtzEMRo0v6WhPgxkrjcU0LkHj+TwBrALwoz/OtMExvsqbbMI0ChyelKabSvQ==", "cpu": [ "ia32" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-loong64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.19.8.tgz", "integrity": "sha512-fHZWS2JJxnXt1uYJsDv9+b60WCc2RlvVAy1F76qOLtXRO+H4mjt3Tr6MJ5l7Q78X8KgCFudnTuiQRBhULUyBKQ==", "cpu": [ "loong64" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-mips64el": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.19.8.tgz", "integrity": "sha512-Wy/z0EL5qZYLX66dVnEg9riiwls5IYnziwuju2oUiuxVc+/edvqXa04qNtbrs0Ukatg5HEzqT94Zs7J207dN5Q==", "cpu": [ "mips64el" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-ppc64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.19.8.tgz", "integrity": "sha512-ETaW6245wK23YIEufhMQ3HSeHO7NgsLx8gygBVldRHKhOlD1oNeNy/P67mIh1zPn2Hr2HLieQrt6tWrVwuqrxg==", "cpu": [ "ppc64" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-riscv64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.19.8.tgz", "integrity": "sha512-T2DRQk55SgoleTP+DtPlMrxi/5r9AeFgkhkZ/B0ap99zmxtxdOixOMI570VjdRCs9pE4Wdkz7JYrsPvsl7eESg==", "cpu": [ "riscv64" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-s390x": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.19.8.tgz", "integrity": "sha512-NPxbdmmo3Bk7mbNeHmcCd7R7fptJaczPYBaELk6NcXxy7HLNyWwCyDJ/Xx+/YcNH7Im5dHdx9gZ5xIwyliQCbg==", "cpu": [ "s390x" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-x64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.19.8.tgz", "integrity": "sha512-lytMAVOM3b1gPypL2TRmZ5rnXl7+6IIk8uB3eLsV1JwcizuolblXRrc5ShPrO9ls/b+RTp+E6gbsuLWHWi2zGg==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/netbsd-x64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.19.8.tgz", "integrity": "sha512-hvWVo2VsXz/8NVt1UhLzxwAfo5sioj92uo0bCfLibB0xlOmimU/DeAEsQILlBQvkhrGjamP0/el5HU76HAitGw==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "netbsd" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/openbsd-x64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.19.8.tgz", "integrity": "sha512-/7Y7u77rdvmGTxR83PgaSvSBJCC2L3Kb1M/+dmSIvRvQPXXCuC97QAwMugBNG0yGcbEGfFBH7ojPzAOxfGNkwQ==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "openbsd" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/sunos-x64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.19.8.tgz", "integrity": "sha512-9Lc4s7Oi98GqFA4HzA/W2JHIYfnXbUYgekUP/Sm4BG9sfLjyv6GKKHKKVs83SMicBF2JwAX6A1PuOLMqpD001w==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "sunos" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/win32-arm64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.19.8.tgz", "integrity": "sha512-rq6WzBGjSzihI9deW3fC2Gqiak68+b7qo5/3kmB6Gvbh/NYPA0sJhrnp7wgV4bNwjqM+R2AApXGxMO7ZoGhIJg==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "win32" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/win32-ia32": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.19.8.tgz", "integrity": "sha512-AIAbverbg5jMvJznYiGhrd3sumfwWs8572mIJL5NQjJa06P8KfCPWZQ0NwZbPQnbQi9OWSZhFVSUWjjIrn4hSw==", "cpu": [ "ia32" ], "dev": true, "optional": true, "os": [ "win32" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/win32-x64": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.19.8.tgz", "integrity": "sha512-bfZ0cQ1uZs2PqpulNL5j/3w+GDhP36k1K5c38QdQg+Swy51jFZWWeIkteNsufkQxp986wnqRRsb/bHbY1WQ7TA==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "win32" ], "engines": { "node": ">=12" } }, "node_modules/@protobufjs/aspromise": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" }, "node_modules/@protobufjs/base64": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" }, "node_modules/@protobufjs/codegen": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" }, "node_modules/@protobufjs/eventemitter": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" }, "node_modules/@protobufjs/fetch": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", "dependencies": { "@protobufjs/aspromise": "^1.1.1", "@protobufjs/inquire": "^1.1.0" } }, "node_modules/@protobufjs/float": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" }, "node_modules/@protobufjs/inquire": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" }, "node_modules/@protobufjs/path": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" }, "node_modules/@protobufjs/pool": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" }, "node_modules/@protobufjs/utf8": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" }, "node_modules/@rollup/rollup-android-arm-eabi": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.6.1.tgz", "integrity": "sha512-0WQ0ouLejaUCRsL93GD4uft3rOmB8qoQMU05Kb8CmMtMBe7XUDLAltxVZI1q6byNqEtU7N1ZX1Vw5lIpgulLQA==", "cpu": [ "arm" ], "dev": true, "optional": true, "os": [ "android" ] }, "node_modules/@rollup/rollup-android-arm64": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.6.1.tgz", "integrity": "sha512-1TKm25Rn20vr5aTGGZqo6E4mzPicCUD79k17EgTLAsXc1zysyi4xXKACfUbwyANEPAEIxkzwue6JZ+stYzWUTA==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "android" ] }, "node_modules/@rollup/rollup-darwin-arm64": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.6.1.tgz", "integrity": "sha512-cEXJQY/ZqMACb+nxzDeX9IPLAg7S94xouJJCNVE5BJM8JUEP4HeTF+ti3cmxWeSJo+5D+o8Tc0UAWUkfENdeyw==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "darwin" ] }, "node_modules/@rollup/rollup-darwin-x64": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.6.1.tgz", "integrity": "sha512-LoSU9Xu56isrkV2jLldcKspJ7sSXmZWkAxg7sW/RfF7GS4F5/v4EiqKSMCFbZtDu2Nc1gxxFdQdKwkKS4rwxNg==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "darwin" ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.6.1.tgz", "integrity": "sha512-EfI3hzYAy5vFNDqpXsNxXcgRDcFHUWSx5nnRSCKwXuQlI5J9dD84g2Usw81n3FLBNsGCegKGwwTVsSKK9cooSQ==", "cpu": [ "arm" ], "dev": true, "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.6.1.tgz", "integrity": "sha512-9lhc4UZstsegbNLhH0Zu6TqvDfmhGzuCWtcTFXY10VjLLUe4Mr0Ye2L3rrtHaDd/J5+tFMEuo5LTCSCMXWfUKw==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.6.1.tgz", "integrity": "sha512-FfoOK1yP5ksX3wwZ4Zk1NgyGHZyuRhf99j64I5oEmirV8EFT7+OhUZEnP+x17lcP/QHJNWGsoJwrz4PJ9fBEXw==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.6.1.tgz", "integrity": "sha512-DNGZvZDO5YF7jN5fX8ZqmGLjZEXIJRdJEdTFMhiyXqyXubBa0WVLDWSNlQ5JR2PNgDbEV1VQowhVRUh+74D+RA==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-linux-x64-musl": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.6.1.tgz", "integrity": "sha512-RkJVNVRM+piYy87HrKmhbexCHg3A6Z6MU0W9GHnJwBQNBeyhCJG9KDce4SAMdicQnpURggSvtbGo9xAWOfSvIQ==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "linux" ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.6.1.tgz", "integrity": "sha512-v2FVT6xfnnmTe3W9bJXl6r5KwJglMK/iRlkKiIFfO6ysKs0rDgz7Cwwf3tjldxQUrHL9INT/1r4VA0n9L/F1vQ==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "win32" ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.6.1.tgz", "integrity": "sha512-YEeOjxRyEjqcWphH9dyLbzgkF8wZSKAKUkldRY6dgNR5oKs2LZazqGB41cWJ4Iqqcy9/zqYgmzBkRoVz3Q9MLw==", "cpu": [ "ia32" ], "dev": true, "optional": true, "os": [ "win32" ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.6.1.tgz", "integrity": "sha512-0zfTlFAIhgz8V2G8STq8toAjsYYA6eci1hnXuyOTUFnymrtJwnS6uGKiv3v5UrPZkBlamLvrLV2iiaeqCKzb0A==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "win32" ] }, "node_modules/@types/command-line-args": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/@types/command-line-args/-/command-line-args-5.2.0.tgz", "integrity": "sha512-UuKzKpJJ/Ief6ufIaIzr3A/0XnluX7RvFgwkV89Yzvm77wCh1kFaFmqN8XEnGcN62EuHdedQjEMb8mYxFLGPyA==", "peer": true }, "node_modules/@types/command-line-usage": { "version": "5.0.2", "resolved": "https://registry.npmjs.org/@types/command-line-usage/-/command-line-usage-5.0.2.tgz", "integrity": "sha512-n7RlEEJ+4x4TS7ZQddTmNSxP+zziEG0TNsMfiRIxcIVXt71ENJ9ojeXmGO3wPoTdn7pJcU2xc3CJYMktNT6DPg==", "peer": true }, "node_modules/@types/long": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz", "integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==" }, "node_modules/@types/node": { "version": "20.3.0", "resolved": "https://registry.npmjs.org/@types/node/-/node-20.3.0.tgz", "integrity": "sha512-cumHmIAf6On83X7yP+LrsEyUOf/YlociZelmpRYaGFydoaPdxdt80MAbu6vWerQT2COCp2nPvHdsbD7tHn/YlQ==" }, "node_modules/@types/pad-left": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/@types/pad-left/-/pad-left-2.1.1.tgz", "integrity": "sha512-Xd22WCRBydkGSApl5Bw0PhAOHKSVjNL3E3AwzKaps96IMraPqy5BvZIsBVK6JLwdybUzjHnuWVwpDd0JjTfHXA==", "peer": true }, "node_modules/@types/tape": { "version": "4.13.4", "resolved": "https://registry.npmjs.org/@types/tape/-/tape-4.13.4.tgz", "integrity": "sha512-0Mw8/FAMheD2MvyaFYDaAix7X5GfNjl/XI+zvqJdzC6N05BmHKz6Hwn+r7+8PEXDEKrC3V/irC9z7mrl5a130g==", "dependencies": { "@types/node": "*", "@types/through": "*" } }, "node_modules/@types/through": { "version": "0.0.33", "resolved": "https://registry.npmjs.org/@types/through/-/through-0.0.33.tgz", "integrity": "sha512-HsJ+z3QuETzP3cswwtzt2vEIiHBk/dCcHGhbmG5X3ecnwFD/lPrMpliGXxSCg03L9AhrdwA4Oz/qfspkDW+xGQ==", "dependencies": { "@types/node": "*" } }, "node_modules/@xenova/transformers": { "version": "2.10.1", "resolved": "https://registry.npmjs.org/@xenova/transformers/-/transformers-2.10.1.tgz", "integrity": "sha512-vUCU3rKEcuH0dSuJzXuESbUPziqh8FvjLVikUh//5SWGcwfdccIfTUhDsS1obbrUTW3ve0CHeJvHnHbk0QTzEA==", "dependencies": { "onnxruntime-web": "1.14.0", "sharp": "^0.32.0" }, "optionalDependencies": { "onnxruntime-node": "1.14.0" } }, "node_modules/ansi-styles": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "peer": true, "dependencies": { "color-convert": "^2.0.1" }, "engines": { "node": ">=8" }, "funding": { "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, "node_modules/apache-arrow": { "version": "13.0.0", "resolved": "https://registry.npmjs.org/apache-arrow/-/apache-arrow-13.0.0.tgz", "integrity": "sha512-3gvCX0GDawWz6KFNC28p65U+zGh/LZ6ZNKWNu74N6CQlKzxeoWHpi4CgEQsgRSEMuyrIIXi1Ea2syja7dwcHvw==", "peer": true, "dependencies": { "@types/command-line-args": "5.2.0", "@types/command-line-usage": "5.0.2", "@types/node": "20.3.0", "@types/pad-left": "2.1.1", "command-line-args": "5.2.1", "command-line-usage": "7.0.1", "flatbuffers": "23.5.26", "json-bignum": "^0.0.3", "pad-left": "^2.1.0", "tslib": "^2.5.3" }, "bin": { "arrow2csv": "bin/arrow2csv.js" } }, "node_modules/array-back": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/array-back/-/array-back-3.1.0.tgz", "integrity": "sha512-TkuxA4UCOvxuDK6NZYXCalszEzj+TLszyASooky+i742l9TqsOdYCMJJupxRic61hwquNtppB3hgcuq9SVSH1Q==", "peer": true, "engines": { "node": ">=6" } }, "node_modules/b4a": { "version": "1.6.4", "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.6.4.tgz", "integrity": "sha512-fpWrvyVHEKyeEvbKZTVOeZF3VSKKWtJxFIxX/jaVPf+cLbGUSitjb49pHLqPV2BUNNZ0LcoeEGfE/YCpyDYHIw==" }, "node_modules/base64-js": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ] }, "node_modules/bl": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", "dependencies": { "buffer": "^5.5.0", "inherits": "^2.0.4", "readable-stream": "^3.4.0" } }, "node_modules/buffer": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.1.13" } }, "node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "peer": true, "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" }, "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/chalk/chalk?sponsor=1" } }, "node_modules/chalk-template": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/chalk-template/-/chalk-template-0.4.0.tgz", "integrity": "sha512-/ghrgmhfY8RaSdeo43hNXxpoHAtxdbskUHjPpfqUWGttFgycUhYPGx3YZBCnUCvOa7Doivn1IZec3DEGFoMgLg==", "peer": true, "dependencies": { "chalk": "^4.1.2" }, "engines": { "node": ">=12" }, "funding": { "url": "https://github.com/chalk/chalk-template?sponsor=1" } }, "node_modules/chownr": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==" }, "node_modules/color": { "version": "4.2.3", "resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz", "integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==", "dependencies": { "color-convert": "^2.0.1", "color-string": "^1.9.0" }, "engines": { "node": ">=12.5.0" } }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dependencies": { "color-name": "~1.1.4" }, "engines": { "node": ">=7.0.0" } }, "node_modules/color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" }, "node_modules/color-string": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", "dependencies": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" } }, "node_modules/command-line-args": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/command-line-args/-/command-line-args-5.2.1.tgz", "integrity": "sha512-H4UfQhZyakIjC74I9d34fGYDwk3XpSr17QhEd0Q3I9Xq1CETHo4Hcuo87WyWHpAF1aSLjLRf5lD9ZGX2qStUvg==", "peer": true, "dependencies": { "array-back": "^3.1.0", "find-replace": "^3.0.0", "lodash.camelcase": "^4.3.0", "typical": "^4.0.0" }, "engines": { "node": ">=4.0.0" } }, "node_modules/command-line-usage": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/command-line-usage/-/command-line-usage-7.0.1.tgz", "integrity": "sha512-NCyznE//MuTjwi3y84QVUGEOT+P5oto1e1Pk/jFPVdPPfsG03qpTIl3yw6etR+v73d0lXsoojRpvbru2sqePxQ==", "peer": true, "dependencies": { "array-back": "^6.2.2", "chalk-template": "^0.4.0", "table-layout": "^3.0.0", "typical": "^7.1.1" }, "engines": { "node": ">=12.20.0" } }, "node_modules/command-line-usage/node_modules/array-back": { "version": "6.2.2", "resolved": "https://registry.npmjs.org/array-back/-/array-back-6.2.2.tgz", "integrity": "sha512-gUAZ7HPyb4SJczXAMUXMGAvI976JoK3qEx9v1FTmeYuJj0IBiaKttG1ydtGKdkfqWkIkouke7nG8ufGy77+Cvw==", "peer": true, "engines": { "node": ">=12.17" } }, "node_modules/command-line-usage/node_modules/typical": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/typical/-/typical-7.1.1.tgz", "integrity": "sha512-T+tKVNs6Wu7IWiAce5BgMd7OZfNYUndHwc5MknN+UHOudi7sGZzuHdCadllRuqJ3fPtgFtIH9+lt9qRv6lmpfA==", "peer": true, "engines": { "node": ">=12.17" } }, "node_modules/commander": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", "engines": { "node": ">= 10" } }, "node_modules/d3-array": { "version": "3.2.4", "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", "dependencies": { "internmap": "1 - 2" }, "engines": { "node": ">=12" } }, "node_modules/d3-color": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", "engines": { "node": ">=12" } }, "node_modules/d3-contour": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz", "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==", "dependencies": { "d3-array": "^3.2.0" }, "engines": { "node": ">=12" } }, "node_modules/d3-dispatch": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", "engines": { "node": ">=12" } }, "node_modules/d3-drag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", "dependencies": { "d3-dispatch": "1 - 3", "d3-selection": "3" }, "engines": { "node": ">=12" } }, "node_modules/d3-dsv": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz", "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==", "dependencies": { "commander": "7", "iconv-lite": "0.6", "rw": "1" }, "bin": { "csv2json": "bin/dsv2json.js", "csv2tsv": "bin/dsv2dsv.js", "dsv2dsv": "bin/dsv2dsv.js", "dsv2json": "bin/dsv2json.js", "json2csv": "bin/json2dsv.js", "json2dsv": "bin/json2dsv.js", "json2tsv": "bin/json2dsv.js", "tsv2csv": "bin/dsv2dsv.js", "tsv2json": "bin/dsv2json.js" }, "engines": { "node": ">=12" } }, "node_modules/d3-ease": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", "engines": { "node": ">=12" } }, "node_modules/d3-fetch": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz", "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==", "dependencies": { "d3-dsv": "1 - 3" }, "engines": { "node": ">=12" } }, "node_modules/d3-format": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz", "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==", "engines": { "node": ">=12" } }, "node_modules/d3-geo": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.0.tgz", "integrity": "sha512-JEo5HxXDdDYXCaWdwLRt79y7giK8SbhZJbFWXqbRTolCHFI5jRqteLzCsq51NKbUoX0PjBVSohxrx+NoOUujYA==", "dependencies": { "d3-array": "2.5.0 - 3" }, "engines": { "node": ">=12" } }, "node_modules/d3-interpolate": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", "dependencies": { "d3-color": "1 - 3" }, "engines": { "node": ">=12" } }, "node_modules/d3-random": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz", "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==", "engines": { "node": ">=12" } }, "node_modules/d3-scale": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", "dependencies": { "d3-array": "2.10.0 - 3", "d3-format": "1 - 3", "d3-interpolate": "1.2.0 - 3", "d3-time": "2.1.1 - 3", "d3-time-format": "2 - 4" }, "engines": { "node": ">=12" } }, "node_modules/d3-scale-chromatic": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.0.0.tgz", "integrity": "sha512-Lx9thtxAKrO2Pq6OO2Ua474opeziKr279P/TKZsMAhYyNDD3EnCffdbgeSYN5O7m2ByQsxtuP2CSDczNUIZ22g==", "dependencies": { "d3-color": "1 - 3", "d3-interpolate": "1 - 3" }, "engines": { "node": ">=12" } }, "node_modules/d3-selection": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", "engines": { "node": ">=12" } }, "node_modules/d3-time": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", "dependencies": { "d3-array": "2 - 3" }, "engines": { "node": ">=12" } }, "node_modules/d3-time-format": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", "dependencies": { "d3-time": "1 - 3" }, "engines": { "node": ">=12" } }, "node_modules/d3-timer": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", "engines": { "node": ">=12" } }, "node_modules/d3-transition": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", "dependencies": { "d3-color": "1 - 3", "d3-dispatch": "1 - 3", "d3-ease": "1 - 3", "d3-interpolate": "1 - 3", "d3-timer": "1 - 3" }, "engines": { "node": ">=12" }, "peerDependencies": { "d3-selection": "2 - 3" } }, "node_modules/d3-zoom": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", "dependencies": { "d3-dispatch": "1 - 3", "d3-drag": "2 - 3", "d3-interpolate": "1 - 3", "d3-selection": "2 - 3", "d3-transition": "2 - 3" }, "engines": { "node": ">=12" } }, "node_modules/decompress-response": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", "dependencies": { "mimic-response": "^3.1.0" }, "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/deep-extend": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", "engines": { "node": ">=4.0.0" } }, "node_modules/deepscatter": { "version": "2.15.0-RC-1", "resolved": "git+ssh://git@github.com/nomic-ai/deepscatter.git#ae66a2022f3a110f6b48815ac17610a9e21602d4", "license": "CC BY-NC-SA 4.0", "dependencies": { "d3-array": "^3.2.2", "d3-color": "^3.1.0", "d3-contour": "^4.0.2", "d3-ease": "^3.0.1", "d3-fetch": "^3.0.1", "d3-format": "^3.1.0", "d3-geo": "^3.1.0", "d3-interpolate": "^3.0.1", "d3-random": "^3.0.1", "d3-scale": "^4.0.2", "d3-scale-chromatic": "^3.0.0", "d3-selection": "^3.0.0", "d3-timer": "^3.0.1", "d3-transition": "^3.0.1", "d3-zoom": "^3.0.0", "deepscatter": "^2.14.0", "glsl-easings": "^1.0.0", "glsl-fast-gaussian-blur": "^1.0.2", "glsl-read-float": "^1.1.0", "lodash.merge": "^4.6.2", "rbush-3d": "^0.0.4", "regl": "^2.1.0" }, "peerDependencies": { "apache-arrow": "13.0.0" } }, "node_modules/deepscatter/node_modules/deepscatter": { "version": "2.14.1", "resolved": "https://registry.npmjs.org/deepscatter/-/deepscatter-2.14.1.tgz", "integrity": "sha512-VKg22WR1l2jWpKdpwV1IcFzvkbQrOJHfNV8Q4AalZ144a+wiQsIeqRvheYYA48NXO1wJFvEiMhRwHHkm1HBxjQ==", "dependencies": { "d3-array": "^3.2.2", "d3-color": "^3.1.0", "d3-contour": "^4.0.2", "d3-ease": "^3.0.1", "d3-fetch": "^3.0.1", "d3-format": "^3.1.0", "d3-geo": "^3.1.0", "d3-interpolate": "^3.0.1", "d3-random": "^3.0.1", "d3-scale": "^4.0.2", "d3-scale-chromatic": "^3.0.0", "d3-selection": "^3.0.0", "d3-timer": "^3.0.1", "d3-transition": "^3.0.1", "d3-zoom": "^3.0.0", "deepscatter": "^2.14.0", "glsl-easings": "^1.0.0", "glsl-fast-gaussian-blur": "^1.0.2", "glsl-read-float": "^1.1.0", "lodash.merge": "^4.6.2", "rbush-3d": "^0.0.4", "regl": "^2.1.0" }, "peerDependencies": { "apache-arrow": "13.0.0" } }, "node_modules/detect-libc": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz", "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==", "engines": { "node": ">=8" } }, "node_modules/end-of-stream": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", "dependencies": { "once": "^1.4.0" } }, "node_modules/esbuild": { "version": "0.19.8", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.8.tgz", "integrity": "sha512-l7iffQpT2OrZfH2rXIp7/FkmaeZM0vxbxN9KfiCwGYuZqzMg/JdvX26R31Zxn/Pxvsrg3Y9N6XTcnknqDyyv4w==", "dev": true, "hasInstallScript": true, "bin": { "esbuild": "bin/esbuild" }, "engines": { "node": ">=12" }, "optionalDependencies": { "@esbuild/android-arm": "0.19.8", "@esbuild/android-arm64": "0.19.8", "@esbuild/android-x64": "0.19.8", "@esbuild/darwin-arm64": "0.19.8", "@esbuild/darwin-x64": "0.19.8", "@esbuild/freebsd-arm64": "0.19.8", "@esbuild/freebsd-x64": "0.19.8", "@esbuild/linux-arm": "0.19.8", "@esbuild/linux-arm64": "0.19.8", "@esbuild/linux-ia32": "0.19.8", "@esbuild/linux-loong64": "0.19.8", "@esbuild/linux-mips64el": "0.19.8", "@esbuild/linux-ppc64": "0.19.8", "@esbuild/linux-riscv64": "0.19.8", "@esbuild/linux-s390x": "0.19.8", "@esbuild/linux-x64": "0.19.8", "@esbuild/netbsd-x64": "0.19.8", "@esbuild/openbsd-x64": "0.19.8", "@esbuild/sunos-x64": "0.19.8", "@esbuild/win32-arm64": "0.19.8", "@esbuild/win32-ia32": "0.19.8", "@esbuild/win32-x64": "0.19.8" } }, "node_modules/expand-template": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", "engines": { "node": ">=6" } }, "node_modules/fast-fifo": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.2.tgz", "integrity": "sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==" }, "node_modules/find-replace": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/find-replace/-/find-replace-3.0.0.tgz", "integrity": "sha512-6Tb2myMioCAgv5kfvP5/PkZZ/ntTpVK39fHY7WkWBgvbeE+VHd/tZuZ4mrC+bxh4cfOZeYKVPaJIZtZXV7GNCQ==", "peer": true, "dependencies": { "array-back": "^3.0.1" }, "engines": { "node": ">=4.0.0" } }, "node_modules/flatbuffers": { "version": "23.5.26", "resolved": "https://registry.npmjs.org/flatbuffers/-/flatbuffers-23.5.26.tgz", "integrity": "sha512-vE+SI9vrJDwi1oETtTIFldC/o9GsVKRM+s6EL0nQgxXlYV1Vc4Tk30hj4xGICftInKQKj1F3up2n8UbIVobISQ==", "peer": true }, "node_modules/fs-constants": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "dev": true, "hasInstallScript": true, "optional": true, "os": [ "darwin" ], "engines": { "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, "node_modules/github-from-package": { "version": "0.0.0", "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==" }, "node_modules/glsl-easings": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/glsl-easings/-/glsl-easings-1.0.0.tgz", "integrity": "sha512-blfWMqztNRVI4sJNyPlA9uV4T7xIm//Ie6fGGtnQPs3FpCj5YOWt/QIVFRMWpsBQhehRRGoaKG2yDw6tkAlmeA==" }, "node_modules/glsl-fast-gaussian-blur": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/glsl-fast-gaussian-blur/-/glsl-fast-gaussian-blur-1.0.2.tgz", "integrity": "sha512-+jkGGhe+3twjyxnmvP3uxVnAUqLdW0tXvvKLCwM/wD7jV5TMEk1w4ZlPIVRt7hexk9jkFEsWGkr9H+1b/aJYrw==" }, "node_modules/glsl-read-float": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/glsl-read-float/-/glsl-read-float-1.1.0.tgz", "integrity": "sha512-cE0rmtZjheE49V8BMlPU6Y9c7RT7vsrUGXTXhWnN8CTu3xP8+l/IlQMi3+3GJU839g34vrwbIHfhLupQsR1n8Q==" }, "node_modules/guid-typescript": { "version": "1.0.9", "resolved": "https://registry.npmjs.org/guid-typescript/-/guid-typescript-1.0.9.tgz", "integrity": "sha512-Y8T4vYhEfwJOTbouREvG+3XDsjr8E3kIr7uf+JZ0BYloFsttiHU0WfvANVsR7TxNUJa/WpCnw/Ino/p+DeBhBQ==" }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "peer": true, "engines": { "node": ">=8" } }, "node_modules/iconv-lite": { "version": "0.6.3", "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" }, "engines": { "node": ">=0.10.0" } }, "node_modules/ieee754": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ] }, "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, "node_modules/ini": { "version": "1.3.8", "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" }, "node_modules/internmap": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", "engines": { "node": ">=12" } }, "node_modules/is-arrayish": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" }, "node_modules/json-bignum": { "version": "0.0.3", "resolved": "https://registry.npmjs.org/json-bignum/-/json-bignum-0.0.3.tgz", "integrity": "sha512-2WHyXj3OfHSgNyuzDbSxI1w2jgw5gkWSWhS7Qg4bWXx1nLk3jnbwfUeS0PSba3IzpTUWdHxBieELUzXRjQB2zg==", "peer": true, "engines": { "node": ">=0.8" } }, "node_modules/lodash.assignwith": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/lodash.assignwith/-/lodash.assignwith-4.2.0.tgz", "integrity": "sha512-ZznplvbvtjK2gMvnQ1BR/zqPFZmS6jbK4p+6Up4xcRYA7yMIwxHCfbTcrYxXKzzqLsQ05eJPVznEW3tuwV7k1g==", "peer": true }, "node_modules/lodash.camelcase": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==", "peer": true }, "node_modules/lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==" }, "node_modules/long": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" }, "node_modules/lru-cache": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", "dependencies": { "yallist": "^4.0.0" }, "engines": { "node": ">=10" } }, "node_modules/mimic-response": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/minimist": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/mkdirp-classic": { "version": "0.5.3", "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==" }, "node_modules/nanoid": { "version": "3.3.7", "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", "dev": true, "funding": [ { "type": "github", "url": "https://github.com/sponsors/ai" } ], "bin": { "nanoid": "bin/nanoid.cjs" }, "engines": { "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, "node_modules/napi-build-utils": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-1.0.2.tgz", "integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg==" }, "node_modules/node-abi": { "version": "3.52.0", "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.52.0.tgz", "integrity": "sha512-JJ98b02z16ILv7859irtXn4oUaFWADtvkzy2c0IAatNVX2Mc9Yoh8z6hZInn3QwvMEYhHuQloYi+TTQy67SIdQ==", "dependencies": { "semver": "^7.3.5" }, "engines": { "node": ">=10" } }, "node_modules/node-addon-api": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-6.1.0.tgz", "integrity": "sha512-+eawOlIgy680F0kBzPUNFhMZGtJ1YmqM6l4+Crf4IkImjYrO/mqPwRMh352g23uIaQKFItcQ64I7KMaJxHgAVA==" }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", "dependencies": { "wrappy": "1" } }, "node_modules/onnx-proto": { "version": "4.0.4", "resolved": "https://registry.npmjs.org/onnx-proto/-/onnx-proto-4.0.4.tgz", "integrity": "sha512-aldMOB3HRoo6q/phyB6QRQxSt895HNNw82BNyZ2CMh4bjeKv7g/c+VpAFtJuEMVfYLMbRx61hbuqnKceLeDcDA==", "dependencies": { "protobufjs": "^6.8.8" } }, "node_modules/onnxruntime-common": { "version": "1.14.0", "resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.14.0.tgz", "integrity": "sha512-3LJpegM2iMNRX2wUmtYfeX/ytfOzNwAWKSq1HbRrKc9+uqG/FsEA0bbKZl1btQeZaXhC26l44NWpNUeXPII7Ew==" }, "node_modules/onnxruntime-node": { "version": "1.14.0", "resolved": "https://registry.npmjs.org/onnxruntime-node/-/onnxruntime-node-1.14.0.tgz", "integrity": "sha512-5ba7TWomIV/9b6NH/1x/8QEeowsb+jBEvFzU6z0T4mNsFwdPqXeFUM7uxC6QeSRkEbWu3qEB0VMjrvzN/0S9+w==", "optional": true, "os": [ "win32", "darwin", "linux" ], "dependencies": { "onnxruntime-common": "~1.14.0" } }, "node_modules/onnxruntime-web": { "version": "1.14.0", "resolved": "https://registry.npmjs.org/onnxruntime-web/-/onnxruntime-web-1.14.0.tgz", "integrity": "sha512-Kcqf43UMfW8mCydVGcX9OMXI2VN17c0p6XvR7IPSZzBf/6lteBzXHvcEVWDPmCKuGombl997HgLqj91F11DzXw==", "dependencies": { "flatbuffers": "^1.12.0", "guid-typescript": "^1.0.9", "long": "^4.0.0", "onnx-proto": "^4.0.4", "onnxruntime-common": "~1.14.0", "platform": "^1.3.6" } }, "node_modules/onnxruntime-web/node_modules/flatbuffers": { "version": "1.12.0", "resolved": "https://registry.npmjs.org/flatbuffers/-/flatbuffers-1.12.0.tgz", "integrity": "sha512-c7CZADjRcl6j0PlvFy0ZqXQ67qSEZfrVPynmnL+2zPc+NtMvrF8Y0QceMo7QqnSPc7+uWjUIAbvCQ5WIKlMVdQ==" }, "node_modules/pad-left": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/pad-left/-/pad-left-2.1.0.tgz", "integrity": "sha512-HJxs9K9AztdIQIAIa/OIazRAUW/L6B9hbQDxO4X07roW3eo9XqZc2ur9bn1StH9CnbbI9EgvejHQX7CBpCF1QA==", "peer": true, "dependencies": { "repeat-string": "^1.5.4" }, "engines": { "node": ">=0.10.0" } }, "node_modules/picocolors": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", "dev": true }, "node_modules/platform": { "version": "1.3.6", "resolved": "https://registry.npmjs.org/platform/-/platform-1.3.6.tgz", "integrity": "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg==" }, "node_modules/postcss": { "version": "8.4.32", "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.32.tgz", "integrity": "sha512-D/kj5JNu6oo2EIy+XL/26JEDTlIbB8hw85G8StOE6L74RQAVVP5rej6wxCNqyMbR4RkPfqvezVbPw81Ngd6Kcw==", "dev": true, "funding": [ { "type": "opencollective", "url": "https://opencollective.com/postcss/" }, { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" }, { "type": "github", "url": "https://github.com/sponsors/ai" } ], "dependencies": { "nanoid": "^3.3.7", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" }, "engines": { "node": "^10 || ^12 || >=14" } }, "node_modules/prebuild-install": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz", "integrity": "sha512-jAXscXWMcCK8GgCoHOfIr0ODh5ai8mj63L2nWrjuAgXE6tDyYGnx4/8o/rCgU+B4JSyZBKbeZqzhtwtC3ovxjw==", "dependencies": { "detect-libc": "^2.0.0", "expand-template": "^2.0.3", "github-from-package": "0.0.0", "minimist": "^1.2.3", "mkdirp-classic": "^0.5.3", "napi-build-utils": "^1.0.1", "node-abi": "^3.3.0", "pump": "^3.0.0", "rc": "^1.2.7", "simple-get": "^4.0.0", "tar-fs": "^2.0.0", "tunnel-agent": "^0.6.0" }, "bin": { "prebuild-install": "bin.js" }, "engines": { "node": ">=10" } }, "node_modules/prebuild-install/node_modules/tar-fs": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==", "dependencies": { "chownr": "^1.1.1", "mkdirp-classic": "^0.5.2", "pump": "^3.0.0", "tar-stream": "^2.1.4" } }, "node_modules/prebuild-install/node_modules/tar-stream": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", "dependencies": { "bl": "^4.0.3", "end-of-stream": "^1.4.1", "fs-constants": "^1.0.0", "inherits": "^2.0.3", "readable-stream": "^3.1.1" }, "engines": { "node": ">=6" } }, "node_modules/protobufjs": { "version": "6.11.4", "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.4.tgz", "integrity": "sha512-5kQWPaJHi1WoCpjTGszzQ32PG2F4+wRY6BmAT4Vfw56Q2FZ4YZzK20xUYQH4YkfehY1e6QSICrJquM6xXZNcrw==", "hasInstallScript": true, "dependencies": { "@protobufjs/aspromise": "^1.1.2", "@protobufjs/base64": "^1.1.2", "@protobufjs/codegen": "^2.0.4", "@protobufjs/eventemitter": "^1.1.0", "@protobufjs/fetch": "^1.1.0", "@protobufjs/float": "^1.0.2", "@protobufjs/inquire": "^1.1.0", "@protobufjs/path": "^1.1.2", "@protobufjs/pool": "^1.1.0", "@protobufjs/utf8": "^1.1.0", "@types/long": "^4.0.1", "@types/node": ">=13.7.0", "long": "^4.0.0" }, "bin": { "pbjs": "bin/pbjs", "pbts": "bin/pbts" } }, "node_modules/pump": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", "dependencies": { "end-of-stream": "^1.1.0", "once": "^1.3.1" } }, "node_modules/queue-tick": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/queue-tick/-/queue-tick-1.0.1.tgz", "integrity": "sha512-kJt5qhMxoszgU/62PLP1CJytzd2NKetjSRnyuj31fDd3Rlcz3fzlFdFLD1SItunPwyqEOkca6GbV612BWfaBag==" }, "node_modules/quickselect": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/quickselect/-/quickselect-1.1.1.tgz", "integrity": "sha512-qN0Gqdw4c4KGPsBOQafj6yj/PA6c/L63f6CaZ/DCF/xF4Esu3jVmKLUDYxghFx8Kb/O7y9tI7x2RjTSXwdK1iQ==" }, "node_modules/rbush-3d": { "version": "0.0.4", "resolved": "https://registry.npmjs.org/rbush-3d/-/rbush-3d-0.0.4.tgz", "integrity": "sha512-s02wJ4Oawn3xdfIUN1hO+dhIcI3zL4vFs+yTlFn/U1jpHh3zaIcaOhLVL6SxyhB4vR/wad77HVJ9dG/ZyLZ6tQ==", "dependencies": { "@types/node": "^10.5.1", "@types/tape": "^4.2.32", "quickselect": "^1.0.0" } }, "node_modules/rbush-3d/node_modules/@types/node": { "version": "10.17.60", "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.60.tgz", "integrity": "sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==" }, "node_modules/rc": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", "dependencies": { "deep-extend": "^0.6.0", "ini": "~1.3.0", "minimist": "^1.2.0", "strip-json-comments": "~2.0.1" }, "bin": { "rc": "cli.js" } }, "node_modules/readable-stream": { "version": "3.6.2", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" }, "engines": { "node": ">= 6" } }, "node_modules/regl": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/regl/-/regl-2.1.0.tgz", "integrity": "sha512-oWUce/aVoEvW5l2V0LK7O5KJMzUSKeiOwFuJehzpSFd43dO5spP9r+sSUfhKtsky4u6MCqWJaRL+abzExynfTg==" }, "node_modules/repeat-string": { "version": "1.6.1", "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", "peer": true, "engines": { "node": ">=0.10" } }, "node_modules/rollup": { "version": "4.6.1", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.6.1.tgz", "integrity": "sha512-jZHaZotEHQaHLgKr8JnQiDT1rmatjgKlMekyksz+yk9jt/8z9quNjnKNRoaM0wd9DC2QKXjmWWuDYtM3jfF8pQ==", "dev": true, "bin": { "rollup": "dist/bin/rollup" }, "engines": { "node": ">=18.0.0", "npm": ">=8.0.0" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.6.1", "@rollup/rollup-android-arm64": "4.6.1", "@rollup/rollup-darwin-arm64": "4.6.1", "@rollup/rollup-darwin-x64": "4.6.1", "@rollup/rollup-linux-arm-gnueabihf": "4.6.1", "@rollup/rollup-linux-arm64-gnu": "4.6.1", "@rollup/rollup-linux-arm64-musl": "4.6.1", "@rollup/rollup-linux-x64-gnu": "4.6.1", "@rollup/rollup-linux-x64-musl": "4.6.1", "@rollup/rollup-win32-arm64-msvc": "4.6.1", "@rollup/rollup-win32-ia32-msvc": "4.6.1", "@rollup/rollup-win32-x64-msvc": "4.6.1", "fsevents": "~2.3.2" } }, "node_modules/rw": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==" }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ] }, "node_modules/safer-buffer": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "node_modules/semver": { "version": "7.5.4", "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dependencies": { "lru-cache": "^6.0.0" }, "bin": { "semver": "bin/semver.js" }, "engines": { "node": ">=10" } }, "node_modules/sharp": { "version": "0.32.6", "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.32.6.tgz", "integrity": "sha512-KyLTWwgcR9Oe4d9HwCwNM2l7+J0dUQwn/yf7S0EnTtb0eVS4RxO0eUSvxPtzT4F3SY+C4K6fqdv/DO27sJ/v/w==", "hasInstallScript": true, "dependencies": { "color": "^4.2.3", "detect-libc": "^2.0.2", "node-addon-api": "^6.1.0", "prebuild-install": "^7.1.1", "semver": "^7.5.4", "simple-get": "^4.0.1", "tar-fs": "^3.0.4", "tunnel-agent": "^0.6.0" }, "engines": { "node": ">=14.15.0" }, "funding": { "url": "https://opencollective.com/libvips" } }, "node_modules/simple-concat": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ] }, "node_modules/simple-get": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz", "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "dependencies": { "decompress-response": "^6.0.0", "once": "^1.3.1", "simple-concat": "^1.0.0" } }, "node_modules/simple-swizzle": { "version": "0.2.2", "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", "dependencies": { "is-arrayish": "^0.3.1" } }, "node_modules/source-map-js": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", "dev": true, "engines": { "node": ">=0.10.0" } }, "node_modules/stream-read-all": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/stream-read-all/-/stream-read-all-3.0.1.tgz", "integrity": "sha512-EWZT9XOceBPlVJRrYcykW8jyRSZYbkb/0ZK36uLEmoWVO5gxBOnntNTseNzfREsqxqdfEGQrD8SXQ3QWbBmq8A==", "peer": true, "engines": { "node": ">=10" } }, "node_modules/streamx": { "version": "2.15.5", "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.15.5.tgz", "integrity": "sha512-9thPGMkKC2GctCzyCUjME3yR03x2xNo0GPKGkRw2UMYN+gqWa9uqpyNWhmsNCutU5zHmkUum0LsCRQTXUgUCAg==", "dependencies": { "fast-fifo": "^1.1.0", "queue-tick": "^1.0.1" } }, "node_modules/string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", "dependencies": { "safe-buffer": "~5.2.0" } }, "node_modules/strip-json-comments": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", "engines": { "node": ">=0.10.0" } }, "node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "peer": true, "dependencies": { "has-flag": "^4.0.0" }, "engines": { "node": ">=8" } }, "node_modules/table-layout": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/table-layout/-/table-layout-3.0.2.tgz", "integrity": "sha512-rpyNZYRw+/C+dYkcQ3Pr+rLxW4CfHpXjPDnG7lYhdRoUcZTUt+KEsX+94RGp/aVp/MQU35JCITv2T/beY4m+hw==", "peer": true, "dependencies": { "@75lb/deep-merge": "^1.1.1", "array-back": "^6.2.2", "command-line-args": "^5.2.1", "command-line-usage": "^7.0.0", "stream-read-all": "^3.0.1", "typical": "^7.1.1", "wordwrapjs": "^5.1.0" }, "bin": { "table-layout": "bin/cli.js" }, "engines": { "node": ">=12.17" } }, "node_modules/table-layout/node_modules/array-back": { "version": "6.2.2", "resolved": "https://registry.npmjs.org/array-back/-/array-back-6.2.2.tgz", "integrity": "sha512-gUAZ7HPyb4SJczXAMUXMGAvI976JoK3qEx9v1FTmeYuJj0IBiaKttG1ydtGKdkfqWkIkouke7nG8ufGy77+Cvw==", "peer": true, "engines": { "node": ">=12.17" } }, "node_modules/table-layout/node_modules/typical": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/typical/-/typical-7.1.1.tgz", "integrity": "sha512-T+tKVNs6Wu7IWiAce5BgMd7OZfNYUndHwc5MknN+UHOudi7sGZzuHdCadllRuqJ3fPtgFtIH9+lt9qRv6lmpfA==", "peer": true, "engines": { "node": ">=12.17" } }, "node_modules/tar-fs": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.0.4.tgz", "integrity": "sha512-5AFQU8b9qLfZCX9zp2duONhPmZv0hGYiBPJsyUdqMjzq/mqVpy/rEUSeHk1+YitmxugaptgBh5oDGU3VsAJq4w==", "dependencies": { "mkdirp-classic": "^0.5.2", "pump": "^3.0.0", "tar-stream": "^3.1.5" } }, "node_modules/tar-stream": { "version": "3.1.6", "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.6.tgz", "integrity": "sha512-B/UyjYwPpMBv+PaFSWAmtYjwdrlEaZQEhMIBFNC5oEG8lpiW8XjcSdmEaClj28ArfKScKHs2nshz3k2le6crsg==", "dependencies": { "b4a": "^1.6.4", "fast-fifo": "^1.2.0", "streamx": "^2.15.0" } }, "node_modules/tslib": { "version": "2.6.2", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==", "peer": true }, "node_modules/tunnel-agent": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", "dependencies": { "safe-buffer": "^5.0.1" }, "engines": { "node": "*" } }, "node_modules/typical": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/typical/-/typical-4.0.0.tgz", "integrity": "sha512-VAH4IvQ7BDFYglMd7BPRDfLgxZZX4O4TFcRDA6EN5X7erNJJq+McIEp8np9aVtxrCJ6qx4GTYVfOWNjcqwZgRw==", "peer": true, "engines": { "node": ">=8" } }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, "node_modules/vite": { "version": "5.0.13", "resolved": "https://registry.npmjs.org/vite/-/vite-5.0.13.tgz", "integrity": "sha512-/9ovhv2M2dGTuA+dY93B9trfyWMDRQw2jdVBhHNP6wr0oF34wG2i/N55801iZIpgUpnHDm4F/FabGQLyc+eOgg==", "dev": true, "dependencies": { "esbuild": "^0.19.3", "postcss": "^8.4.32", "rollup": "^4.2.0" }, "bin": { "vite": "bin/vite.js" }, "engines": { "node": "^18.0.0 || >=20.0.0" }, "funding": { "url": "https://github.com/vitejs/vite?sponsor=1" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "peerDependencies": { "@types/node": "^18.0.0 || >=20.0.0", "less": "*", "lightningcss": "^1.21.0", "sass": "*", "stylus": "*", "sugarss": "*", "terser": "^5.4.0" }, "peerDependenciesMeta": { "@types/node": { "optional": true }, "less": { "optional": true }, "lightningcss": { "optional": true }, "sass": { "optional": true }, "stylus": { "optional": true }, "sugarss": { "optional": true }, "terser": { "optional": true } } }, "node_modules/wordwrapjs": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/wordwrapjs/-/wordwrapjs-5.1.0.tgz", "integrity": "sha512-JNjcULU2e4KJwUNv6CHgI46UvDGitb6dGryHajXTDiLgg1/RiGoPSDw4kZfYnwGtEXf2ZMeIewDQgFGzkCB2Sg==", "peer": true, "engines": { "node": ">=12.17" } }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" }, "node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" } } }
transformers.js/examples/semantic-audio-search/package-lock.json/0
{ "file_path": "transformers.js/examples/semantic-audio-search/package-lock.json", "repo_id": "transformers.js", "token_count": 46220 }
308
'use client' import Image from 'next/image' import { downloadImage } from '../utils.js' export function Modal({ currentImage, setCurrentImage }) { const photo_url = currentImage ? `https://unsplash.com/photos/${currentImage.id}` : null; const photo_image_url = currentImage ? `https://images.unsplash.com/${currentImage.url}?auto=format&fit=crop&w=480&q=80` : null; return ( <div className='fixed inset-0 z-30 backdrop-blur-2xl w-full h-full bg-black top-0 left-0 transition' style={{ backgroundColor: `rgba(0, 0, 0, ${currentImage ? 0.8 : 0})`, opacity: currentImage ? 1 : 0, pointerEvents: currentImage ? 'auto' : 'none', }} > {currentImage && <> <Image alt='' className="transform rounded-lg transition will-change-auto" style={ { transform: 'translate3d(0, 0, 0)', } } layout={'fill'} objectFit={'contain'} src={photo_image_url} unoptimized={true} /> <div className='absolute top-0 left-0 flex items-center gap-2 p-3 text-white' > <button onClick={() => setCurrentImage(null)} className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white"> <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5"> <path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12"></path> </svg> </button> </div> <div className="absolute top-0 right-0 flex items-center gap-2 p-3 text-white"> <a href={photo_url} className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white" target="_blank" title="View on Unsplash" rel="noreferrer"> <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5"> <path strokeLinecap="round" strokeLinejoin="round" d="M13.5 6H5.25A2.25 2.25 0 003 8.25v10.5A2.25 2.25 0 005.25 21h10.5A2.25 2.25 0 0018 18.75V10.5m-10.5 6L21 3m0 0h-5.25M21 3v5.25"></path> </svg> </a> <button onClick={() => downloadImage(photo_image_url, `${currentImage.id}.png`)} className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white" title="Download"> <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5"> <path strokeLinecap="round" strokeLinejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5M16.5 12L12 16.5m0 0L7.5 12m4.5 4.5V3"> </path> </svg> </button> </div> </> } </div>) }
transformers.js/examples/semantic-image-search-client/src/app/components/Modal.jsx/0
{ "file_path": "transformers.js/examples/semantic-image-search-client/src/app/components/Modal.jsx", "repo_id": "transformers.js", "token_count": 2014 }
309
# Vanilla JS Application This folder contains the source code for a simple web application that detects objects in images using Transformers.js! Check out the demo [here](https://huggingface.co/spaces/Scrimba/vanilla-js-object-detector). If you'd like to build it yourself, you can follow the [written](https://huggingface.co/docs/transformers.js/tutorials/vanilla-js) or [interactive video](https://scrimba.com/scrim/cKm9bDAg) tutorials we have made! Here's how it works: the user clicks “Upload image” and selects an image using an input dialog. After analysing the image with an object detection model, the predicted bounding boxes are overlaid on top of the image, like this: ![Demo](https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/js-detection-interence-zebra.png)
transformers.js/examples/vanilla-js/README.md/0
{ "file_path": "transformers.js/examples/vanilla-js/README.md", "repo_id": "transformers.js", "token_count": 237 }
310
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title>Transformers.js | real-time CLIP</title> </head> <body> <h1> Real-time zero-shot image classification (WebGPU) </h1> <h3> Runs locally in your browser w/ <a href="https://github.com/huggingface/transformers.js" target="_blank">🤗 Transformers.js</a> </h3> <div id="container"> <video id="video" autoplay muted playsinline></video> <div id="overlay"></div> </div> <div id="controls"> <div title="Labels used to perform zero-shot image classification"> <label>Labels (comma-separated)</label> <br> <input id="labels" type="text" disabled> </div> <div title="Template used to perform zero-shot image classification"> <label>Hypothesis template</label> <br> <input id="template" type="text" value="A photo of a {}" disabled> </div> </div> <label id="status"></label> <script type="module" src="/main.js"></script> </body> </html>
transformers.js/examples/webgpu-clip/index.html/0
{ "file_path": "transformers.js/examples/webgpu-clip/index.html", "repo_id": "transformers.js", "token_count": 417 }
311
# TODO: Enable once https://github.com/huggingface/optimum/pull/1552 is merged # # Support exporting vision and text models separately: # # Adapted from https://github.com/huggingface/optimum/issues/1186#issuecomment-1637641760 # from optimum.exporters.onnx.model_configs import CLAPTextWithProjectionOnnxConfig, AudioOnnxConfig # from optimum.utils.normalized_config import NormalizedAudioConfig # from optimum.utils.input_generators import DummyAudioInputGenerator # from typing import Dict # class ClapAudioModelWithProjectionOnnxConfig(AudioOnnxConfig): # NORMALIZED_CONFIG_CLASS = NormalizedAudioConfig # DUMMY_INPUT_GENERATOR_CLASSES = (DummyAudioInputGenerator, ) # @property # def inputs(self) -> Dict[str, Dict[int, str]]: # return { # "input_features": {0: "audio_batch_size", 1: "num_channels", 2: "height", 3: "width"}, # As described in modeling_clap.py # } # @property # def outputs(self) -> Dict[str, Dict[int, str]]: # return { # "audio_embeds": {0: "batch_size"}, # } # class ClapTextModelWithProjectionOnnxConfig(CLAPTextWithProjectionOnnxConfig): # @property # def outputs(self) -> Dict[str, Dict[int, str]]: # return { # "text_embeds": {0: "batch_size"}, # } # def generate_dummy_inputs(self, framework: str = "pt", **kwargs): # dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs) # if framework == "pt": # import torch # dummy_inputs["input_ids"] = dummy_inputs["input_ids"].to(dtype=torch.int64) # return dummy_inputs
transformers.js/scripts/extra/clap.py/0
{ "file_path": "transformers.js/scripts/extra/clap.py", "repo_id": "transformers.js", "token_count": 684 }
312
import { Callable } from "../utils/generic.js"; import { Tensor, interpolate, stack } from "../utils/tensor.js"; import { bankers_round, max, min, softmax } from "../utils/maths.js"; import { RawImage } from "../utils/image.js"; import { calculateReflectOffset } from "../utils/core.js"; import { getModelJSON } from "../utils/hub.js"; import { IMAGE_PROCESSOR_NAME } from '../utils/constants.js'; /** * Named tuple to indicate the order we are using is (height x width), * even though the Graphics' industry standard is (width x height). * @typedef {[height: number, width: number]} HeightWidth */ /** * @typedef {object} ImageProcessorResult * @property {Tensor} pixel_values The pixel values of the batched preprocessed images. * @property {HeightWidth[]} original_sizes Array of two-dimensional tuples like [[480, 640]]. * @property {HeightWidth[]} reshaped_input_sizes Array of two-dimensional tuples like [[1000, 1330]]. */ /** * Helper function to constrain a value to be a multiple of a number. * @param {number} val The value to constrain. * @param {number} multiple The number to constrain to. * @param {number} [minVal=0] The minimum value to constrain to. * @param {number} [maxVal=null] The maximum value to constrain to. * @returns {number} The constrained value. * @private */ function constraint_to_multiple_of(val, multiple, minVal = 0, maxVal = null) { const a = val / multiple; let x = bankers_round(a) * multiple; if (maxVal !== null && x > maxVal) { x = Math.floor(a) * multiple; } if (x < minVal) { x = Math.ceil(a) * multiple; } return x; } /** * Rounds the height and width down to the closest multiple of size_divisibility * @param {[number, number]} size The size of the image * @param {number} divisor The divisor to use. * @returns {[number, number]} The rounded size. */ function enforce_size_divisibility([width, height], divisor) { return [ Math.max(Math.floor(width / divisor), 1) * divisor, Math.max(Math.floor(height / divisor), 1) * divisor ]; } // Helper functions /** * Converts bounding boxes from center format to corners format. * * @param {number[]} arr The coordinate for the center of the box and its width, height dimensions (center_x, center_y, width, height) * @returns {number[]} The coodinates for the top-left and bottom-right corners of the box (top_left_x, top_left_y, bottom_right_x, bottom_right_y) */ export function center_to_corners_format([centerX, centerY, width, height]) { return [ centerX - width / 2, centerY - height / 2, centerX + width / 2, centerY + height / 2 ]; } /** * Post-processes the outputs of the model (for object detection). * @param {Object} outputs The outputs of the model that must be post-processed * @param {Tensor} outputs.logits The logits * @param {Tensor} outputs.pred_boxes The predicted boxes. * @param {number} [threshold=0.5] The threshold to use for the scores. * @param {[number, number][]} [target_sizes=null] The sizes of the original images. * @param {boolean} [is_zero_shot=false] Whether zero-shot object detection was performed. * @return {Object[]} An array of objects containing the post-processed outputs. */ export function post_process_object_detection(outputs, threshold = 0.5, target_sizes = null, is_zero_shot = false) { const out_logits = outputs.logits; const out_bbox = outputs.pred_boxes; const [batch_size, num_boxes, num_classes] = out_logits.dims; if (target_sizes !== null && target_sizes.length !== batch_size) { throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits") } let toReturn = []; for (let i = 0; i < batch_size; ++i) { let target_size = target_sizes !== null ? target_sizes[i] : null; let info = { boxes: [], classes: [], scores: [] } let logits = out_logits[i]; let bbox = out_bbox[i]; for (let j = 0; j < num_boxes; ++j) { let logit = logits[j]; let indices = []; let probs; if (is_zero_shot) { // Get indices of classes with high enough probability probs = logit.sigmoid().data; for (let k = 0; k < probs.length; ++k) { if (probs[k] > threshold) { indices.push(k); } } } else { // Get most probable class let maxIndex = max(logit.data)[1]; if (maxIndex === num_classes - 1) { // This is the background class, skip it continue; } // Compute softmax over classes probs = softmax(logit.data); if (probs[maxIndex] < threshold) { continue; } indices.push(maxIndex); } for (const index of indices) { // Some class has a high enough probability /** @type {number[]} */ let box = bbox[j].data; // convert to [x0, y0, x1, y1] format box = center_to_corners_format(box) if (target_size !== null) { box = box.map((x, i) => x * target_size[(i + 1) % 2]) } info.boxes.push(box); info.classes.push(index); info.scores.push(probs[index]); } } toReturn.push(info); } return toReturn; } /** * Post-processes the outputs of the model (for semantic segmentation). * @param {*} outputs Raw outputs of the model. * @param {[number, number][]} [target_sizes=null] List of tuples corresponding to the requested final size * (height, width) of each prediction. If unset, predictions will not be resized. * @returns {{segmentation: Tensor; labels: number[]}[]} The semantic segmentation maps. */ export function post_process_semantic_segmentation(outputs, target_sizes = null) { const logits = outputs.logits; const batch_size = logits.dims[0]; if (target_sizes !== null && target_sizes.length !== batch_size) { throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits") } const toReturn = []; for (let i = 0; i < batch_size; ++i) { const target_size = target_sizes !== null ? target_sizes[i] : null; let data = logits[i]; // 1. If target_size is not null, we need to resize the masks to the target size if (target_size !== null) { // resize the masks to the target size data = interpolate(data, target_size, 'bilinear', false); } const [height, width] = target_size ?? data.dims.slice(-2); const segmentation = new Tensor( 'int32', new Int32Array(height * width), [height, width] ); // Buffer to store current largest value const buffer = data[0].data; const segmentation_data = segmentation.data; for (let j = 1; j < data.dims[0]; ++j) { const row = data[j].data; for (let k = 0; k < row.length; ++k) { if (row[k] > buffer[k]) { buffer[k] = row[k]; segmentation_data[k] = j; } } } // Store which objects have labels // This is much more efficient that creating a set of the final values const hasLabel = new Array(data.dims[0]); for (let j = 0; j < segmentation_data.length; ++j) { const index = segmentation_data[j]; hasLabel[index] = index; } /** @type {number[]} The unique list of labels that were detected */ const labels = hasLabel.filter(x => x !== undefined); toReturn.push({ segmentation, labels }); } return toReturn; } /** * Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. * @param {Tensor} class_logits The class logits. * @param {Tensor} mask_logits The mask logits. * @param {number} object_mask_threshold A number between 0 and 1 used to binarize the masks. * @param {number} num_labels The number of labels. * @returns {[Tensor[], number[], number[]]} The binarized masks, the scores, and the labels. * @private */ function remove_low_and_no_objects(class_logits, mask_logits, object_mask_threshold, num_labels) { const mask_probs_item = []; const pred_scores_item = []; const pred_labels_item = []; for (let j = 0; j < class_logits.dims[0]; ++j) { const cls = class_logits[j]; const mask = mask_logits[j]; const pred_label = max(cls.data)[1]; if (pred_label === num_labels) { // Is the background, so we ignore it continue; } const scores = softmax(cls.data); const pred_score = scores[pred_label]; if (pred_score > object_mask_threshold) { mask_probs_item.push(mask); pred_scores_item.push(pred_score); pred_labels_item.push(pred_label); } } return [mask_probs_item, pred_scores_item, pred_labels_item]; } /** * Checks whether the segment is valid or not. * @param {Int32Array} mask_labels Labels for each pixel in the mask. * @param {Tensor[]} mask_probs Probabilities for each pixel in the masks. * @param {number} k The class id of the segment. * @param {number} mask_threshold The mask threshold. * @param {number} overlap_mask_area_threshold The overlap mask area threshold. * @returns {[boolean, number[]]} Whether the segment is valid or not, and the indices of the valid labels. * @private */ function check_segment_validity( mask_labels, mask_probs, k, mask_threshold = 0.5, overlap_mask_area_threshold = 0.8 ) { // mask_k is a 1D array of indices, indicating where the mask is equal to k const mask_k = []; let mask_k_area = 0; let original_area = 0; const mask_probs_k_data = mask_probs[k].data; // Compute the area of all the stuff in query k for (let i = 0; i < mask_labels.length; ++i) { if (mask_labels[i] === k) { mask_k.push(i); ++mask_k_area; } if (mask_probs_k_data[i] >= mask_threshold) { ++original_area; } } let mask_exists = mask_k_area > 0 && original_area > 0; // Eliminate disconnected tiny segments if (mask_exists) { // Perform additional check let area_ratio = mask_k_area / original_area; mask_exists = area_ratio > overlap_mask_area_threshold; } return [mask_exists, mask_k] } /** * Computes the segments. * @param {Tensor[]} mask_probs The mask probabilities. * @param {number[]} pred_scores The predicted scores. * @param {number[]} pred_labels The predicted labels. * @param {number} mask_threshold The mask threshold. * @param {number} overlap_mask_area_threshold The overlap mask area threshold. * @param {Set<number>} label_ids_to_fuse The label ids to fuse. * @param {number[]} target_size The target size of the image. * @returns {[Tensor, Array<{id: number, label_id: number, score: number}>]} The computed segments. * @private */ function compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold, overlap_mask_area_threshold, label_ids_to_fuse = null, target_size = null, ) { const [height, width] = target_size ?? mask_probs[0].dims; const segmentation = new Tensor( 'int32', new Int32Array(height * width), [height, width] ); const segments = []; // 1. If target_size is not null, we need to resize the masks to the target size if (target_size !== null) { // resize the masks to the target size for (let i = 0; i < mask_probs.length; ++i) { mask_probs[i] = interpolate(mask_probs[i], target_size, 'bilinear', false); } } // 2. Weigh each mask by its prediction score // NOTE: `mask_probs` is updated in-place // // Temporary storage for the best label/scores for each pixel ([height, width]): const mask_labels = new Int32Array(mask_probs[0].data.length); const bestScores = new Float32Array(mask_probs[0].data.length); for (let i = 0; i < mask_probs.length; ++i) { let score = pred_scores[i]; const mask_probs_i_data = mask_probs[i].data; for (let j = 0; j < mask_probs_i_data.length; ++j) { mask_probs_i_data[j] *= score if (mask_probs_i_data[j] > bestScores[j]) { mask_labels[j] = i; bestScores[j] = mask_probs_i_data[j]; } } } let current_segment_id = 0; // let stuff_memory_list = {} const segmentation_data = segmentation.data; for (let k = 0; k < pred_labels.length; ++k) { const pred_class = pred_labels[k]; // TODO add `should_fuse` // let should_fuse = pred_class in label_ids_to_fuse // Check if mask exists and large enough to be a segment const [mask_exists, mask_k] = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if (!mask_exists) { // Nothing to see here continue; } // TODO // if (pred_class in stuff_memory_list) { // current_segment_id = stuff_memory_list[pred_class] // } else { // current_segment_id += 1; // } ++current_segment_id; // Add current object segment to final segmentation map for (const index of mask_k) { segmentation_data[index] = current_segment_id; } segments.push({ id: current_segment_id, label_id: pred_class, // was_fused: should_fuse, TODO score: pred_scores[k], }) // TODO // if(should_fuse){ // stuff_memory_list[pred_class] = current_segment_id // } } return [segmentation, segments]; } /** * Rescales the image so that the following conditions are met: * * 1. Both dimensions (height and width) are divisible by 'factor'. * 2. The total number of pixels is within the range ['min_pixels', 'max_pixels']. * 3. The aspect ratio of the image is maintained as closely as possible. * * @param {number} height The height of the image. * @param {number} width The width of the image. * @param {number} [factor=28] The factor to use for resizing. * @param {number} [min_pixels=56*56] The minimum number of pixels. * @param {number} [max_pixels=14*14*4*1280] The maximum number of pixels. * @returns {[number, number]} The new height and width of the image. * @throws {Error} If the height or width is smaller than the factor. */ function smart_resize(height, width, factor = 28, min_pixels = 56 * 56, max_pixels = 14 * 14 * 4 * 1280) { if (height < factor || width < factor) { throw new Error(`height:${height} or width:${width} must be larger than factor:${factor}`); } else if (Math.max(height, width) / Math.min(height, width) > 200) { throw new Error( `absolute aspect ratio must be smaller than 200, got ${Math.max(height, width) / Math.min(height, width)}` ); } let h_bar = Math.round(height / factor) * factor; let w_bar = Math.round(width / factor) * factor; if (h_bar * w_bar > max_pixels) { const beta = Math.sqrt((height * width) / max_pixels); h_bar = Math.floor((height / beta) / factor) * factor; w_bar = Math.floor((width / beta) / factor) * factor; } else if (h_bar * w_bar < min_pixels) { const beta = Math.sqrt(min_pixels / (height * width)); h_bar = Math.ceil((height * beta) / factor) * factor; w_bar = Math.ceil((width * beta) / factor) * factor; } return [h_bar, w_bar]; } /** * Post-process the model output to generate the final panoptic segmentation. * @param {*} outputs The model output to post process * @param {number} [threshold=0.5] The probability score threshold to keep predicted instance masks. * @param {number} [mask_threshold=0.5] Threshold to use when turning the predicted masks into binary values. * @param {number} [overlap_mask_area_threshold=0.8] The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. * @param {Set<number>} [label_ids_to_fuse=null] The labels in this state will have all their instances be fused together. * @param {[number, number][]} [target_sizes=null] The target sizes to resize the masks to. * @returns {Array<{ segmentation: Tensor, segments_info: Array<{id: number, label_id: number, score: number}>}>} */ export function post_process_panoptic_segmentation( outputs, threshold = 0.5, mask_threshold = 0.5, overlap_mask_area_threshold = 0.8, label_ids_to_fuse = null, target_sizes = null, ) { if (label_ids_to_fuse === null) { console.warn("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = new Set(); } const class_queries_logits = outputs.class_queries_logits ?? outputs.logits; // [batch_size, num_queries, num_classes+1] const masks_queries_logits = outputs.masks_queries_logits ?? outputs.pred_masks; // [batch_size, num_queries, height, width] const mask_probs = masks_queries_logits.sigmoid() // [batch_size, num_queries, height, width] let [batch_size, num_queries, num_labels] = class_queries_logits.dims; num_labels -= 1; // Remove last class (background) if (target_sizes !== null && target_sizes.length !== batch_size) { throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits") } let toReturn = []; for (let i = 0; i < batch_size; ++i) { let target_size = target_sizes !== null ? target_sizes[i] : null; let class_logits = class_queries_logits[i]; let mask_logits = mask_probs[i]; let [mask_probs_item, pred_scores_item, pred_labels_item] = remove_low_and_no_objects(class_logits, mask_logits, threshold, num_labels); if (pred_labels_item.length === 0) { // No mask found let [height, width] = target_size ?? mask_logits.dims.slice(-2); let segmentation = new Tensor( 'int32', new Int32Array(height * width).fill(-1), [height, width] ) toReturn.push({ segmentation: segmentation, segments_info: [] }); continue; } // Get segmentation map and segment information of batch item let [segmentation, segments] = compute_segments( mask_probs_item, pred_scores_item, pred_labels_item, mask_threshold, overlap_mask_area_threshold, label_ids_to_fuse, target_size, ) toReturn.push({ segmentation: segmentation, segments_info: segments }) } return toReturn; } /** * Post-processes the outputs of the model (for instance segmentation). * @param {*} outputs Raw outputs of the model. * @param {number} [threshold=0.5] The probability score threshold to keep predicted instance masks. * @param {[number, number][]} [target_sizes=null] List of tuples corresponding to the requested final size * (height, width) of each prediction. If unset, predictions will not be resized. * @returns {Array<{ segmentation: Tensor, segments_info: Array<{id: number, label_id: number, score: number}>}>} */ export function post_process_instance_segmentation(outputs, threshold = 0.5, target_sizes = null) { throw new Error('`post_process_instance_segmentation` is not yet implemented.'); } /** * @typedef {Object} ImageProcessorConfig A configuration object used to create an image processor. * @property {function} [progress_callback=null] If specified, this function will be called during model construction, to provide the user with progress updates. * @property {number[]} [image_mean] The mean values for image normalization. * @property {number[]} [image_std] The standard deviation values for image normalization. * @property {boolean} [do_rescale] Whether to rescale the image pixel values to the [0,1] range. * @property {number} [rescale_factor] The factor to use for rescaling the image pixel values. * @property {boolean} [do_normalize] Whether to normalize the image pixel values. * @property {boolean} [do_resize] Whether to resize the image. * @property {number} [resample] What method to use for resampling. * @property {number|Object} [size] The size to resize the image to. * @property {number|Object} [image_size] The size to resize the image to (same as `size`). * @property {boolean} [do_flip_channel_order=false] Whether to flip the color channels from RGB to BGR. * Can be overridden by the `do_flip_channel_order` parameter in the `preprocess` method. * @property {boolean} [do_center_crop] Whether to center crop the image to the specified `crop_size`. * Can be overridden by `do_center_crop` in the `preprocess` method. * @property {boolean} [do_thumbnail] Whether to resize the image using thumbnail method. * @property {boolean} [keep_aspect_ratio] If `true`, the image is resized to the largest possible size such that the aspect ratio is preserved. * Can be overidden by `keep_aspect_ratio` in `preprocess`. * @property {number} [ensure_multiple_of] If `do_resize` is `true`, the image is resized to a size that is a multiple of this value. * Can be overidden by `ensure_multiple_of` in `preprocess`. * * @property {number[]} [mean] The mean values for image normalization (same as `image_mean`). * @property {number[]} [std] The standard deviation values for image normalization (same as `image_std`). */ export class ImageProcessor extends Callable { /** * Constructs a new `ImageProcessor`. * @param {ImageProcessorConfig} config The configuration object. */ constructor(config) { super(); this.image_mean = config.image_mean ?? config.mean; this.image_std = config.image_std ?? config.std; this.resample = config.resample ?? 2; // 2 => bilinear this.do_rescale = config.do_rescale ?? true; this.rescale_factor = config.rescale_factor ?? (1 / 255); this.do_normalize = config.do_normalize; this.do_thumbnail = config.do_thumbnail; this.size = config.size ?? config.image_size; this.do_resize = config.do_resize ?? (this.size !== undefined); // @ts-expect-error TS2339 this.size_divisibility = config.size_divisibility ?? config.size_divisor; this.do_center_crop = config.do_center_crop; // @ts-expect-error TS2339 this.crop_size = config.crop_size; // @ts-expect-error TS2339 this.do_convert_rgb = config.do_convert_rgb ?? true; // @ts-expect-error TS2339 this.do_crop_margin = config.do_crop_margin; // @ts-expect-error TS2339 this.pad_size = config.pad_size; // @ts-expect-error TS2339 this.do_pad = config.do_pad; // @ts-expect-error TS2339 this.min_pixels = config.min_pixels; // @ts-expect-error TS2339 this.max_pixels = config.max_pixels; if (this.do_pad && !this.pad_size && this.size && this.size.width !== undefined && this.size.height !== undefined) { // Should pad, but no pad size specified // We infer the pad size from the resize size this.pad_size = this.size } this.do_flip_channel_order = config.do_flip_channel_order ?? false; this.config = config; } /** * Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any * corresponding dimension of the specified size. * @param {RawImage} image The image to be resized. * @param {{height:number, width:number}} size The size `{"height": h, "width": w}` to resize the image to. * @param {string | 0 | 1 | 2 | 3 | 4 | 5} [resample=2] The resampling filter to use. * @returns {Promise<RawImage>} The resized image. */ async thumbnail(image, size, resample = 2) { const input_height = image.height; const input_width = image.width; const output_height = size.height; const output_width = size.width; // We always resize to the smallest of either the input or output size. let height = Math.min(input_height, output_height) let width = Math.min(input_width, output_width) if (height === input_height && width === input_width) { return image; } if (input_height > input_width) { width = Math.floor(input_width * height / input_height); } else if (input_width > input_height) { height = Math.floor(input_height * width / input_width); } return await image.resize(width, height, { resample }); } /** * Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the threshold). * @param {RawImage} image The image to be cropped. * @param {number} gray_threshold Value below which pixels are considered to be gray. * @returns {Promise<RawImage>} The cropped image. */ async crop_margin(image, gray_threshold = 200) { const gray_image = image.clone().grayscale(); const minValue = min(gray_image.data)[0]; const maxValue = max(gray_image.data)[0]; const diff = maxValue - minValue; if (diff === 0) { return image; } const threshold = gray_threshold / 255; let x_min = gray_image.width, y_min = gray_image.height, x_max = 0, y_max = 0; const gray_image_data = gray_image.data; for (let j = 0; j < gray_image.height; ++j) { const row = j * gray_image.width; for (let i = 0; i < gray_image.width; ++i) { if ((gray_image_data[row + i] - minValue) / diff < threshold) { // We have a non-zero pixel, so we update the min/max values accordingly x_min = Math.min(x_min, i); y_min = Math.min(y_min, j); x_max = Math.max(x_max, i); y_max = Math.max(y_max, j); } } } image = await image.crop([x_min, y_min, x_max, y_max]); return image; } /** * Pad the image by a certain amount. * @param {Float32Array} pixelData The pixel data to pad. * @param {number[]} imgDims The dimensions of the image (height, width, channels). * @param {{width:number; height:number}|number|'square'} padSize The dimensions of the padded image. * @param {Object} options The options for padding. * @param {'constant'|'symmetric'} [options.mode='constant'] The type of padding to add. * @param {boolean} [options.center=false] Whether to center the image. * @param {number|number[]} [options.constant_values=0] The constant value to use for padding. * @returns {[Float32Array, number[]]} The padded pixel data and image dimensions. */ pad_image(pixelData, imgDims, padSize, { mode = 'constant', center = false, constant_values = 0, } = {}) { const [imageHeight, imageWidth, imageChannels] = imgDims; let paddedImageWidth, paddedImageHeight; if (typeof padSize === 'number') { paddedImageWidth = padSize; paddedImageHeight = padSize; } else if (padSize === 'square') { paddedImageWidth = paddedImageHeight = Math.max(imageHeight, imageWidth); } else { paddedImageWidth = padSize.width; paddedImageHeight = padSize.height; } // Only add padding if there is a difference in size if (paddedImageWidth !== imageWidth || paddedImageHeight !== imageHeight) { const paddedPixelData = new Float32Array(paddedImageWidth * paddedImageHeight * imageChannels); if (Array.isArray(constant_values)) { // Fill with constant values, cycling through the array for (let i = 0; i < paddedPixelData.length; ++i) { paddedPixelData[i] = constant_values[i % imageChannels]; } } else if (constant_values !== 0) { paddedPixelData.fill(constant_values); } const [left, top] = center ? [Math.floor((paddedImageWidth - imageWidth) / 2), Math.floor((paddedImageHeight - imageHeight) / 2)] : [0, 0]; // Copy the original image into the padded image for (let i = 0; i < imageHeight; ++i) { const a = (i + top) * paddedImageWidth; const b = i * imageWidth; for (let j = 0; j < imageWidth; ++j) { const c = (a + j + left) * imageChannels; const d = (b + j) * imageChannels; for (let k = 0; k < imageChannels; ++k) { paddedPixelData[c + k] = pixelData[d + k]; } } } if (mode === 'symmetric') { if (center) { throw new Error('`center` padding is not supported when `mode` is set to `symmetric`.'); // TODO: Implement this } const h1 = imageHeight - 1; const w1 = imageWidth - 1; for (let i = 0; i < paddedImageHeight; ++i) { const a = i * paddedImageWidth; const b = calculateReflectOffset(i, h1) * imageWidth; for (let j = 0; j < paddedImageWidth; ++j) { if (i < imageHeight && j < imageWidth) continue; // Do not overwrite original image const c = (a + j) * imageChannels; const d = (b + calculateReflectOffset(j, w1)) * imageChannels; // Copy channel-wise for (let k = 0; k < imageChannels; ++k) { paddedPixelData[c + k] = pixelData[d + k]; } } } } // Update pixel data and image dimensions pixelData = paddedPixelData; imgDims = [paddedImageHeight, paddedImageWidth, imageChannels] } return [pixelData, imgDims]; } /** * Rescale the image' pixel values by `this.rescale_factor`. * @param {Float32Array} pixelData The pixel data to rescale. * @returns {void} */ rescale(pixelData) { for (let i = 0; i < pixelData.length; ++i) { pixelData[i] = this.rescale_factor * pixelData[i]; } } /** * Find the target (width, height) dimension of the output image after * resizing given the input image and the desired size. * @param {RawImage} image The image to resize. * @param {any} size The size to use for resizing the image. * @returns {[number, number]} The target (width, height) dimension of the output image after resizing. */ get_resize_output_image_size(image, size) { // `size` comes in many forms, so we need to handle them all here: // 1. `size` is an integer, in which case we resize the image to be a square const [srcWidth, srcHeight] = image.size; let shortest_edge; let longest_edge; if (this.do_thumbnail) { // NOTE: custom logic for `Donut` models const { height, width } = size; shortest_edge = Math.min(height, width) } // Support both formats for backwards compatibility else if (Number.isInteger(size)) { shortest_edge = size; // @ts-expect-error TS2339 longest_edge = this.config.max_size ?? shortest_edge; } else if (size !== undefined) { // Extract known properties from `size` shortest_edge = size.shortest_edge; longest_edge = size.longest_edge; } // If `longest_edge` and `shortest_edge` are set, maintain aspect ratio and resize to `shortest_edge` // while keeping the largest dimension <= `longest_edge` if (shortest_edge !== undefined || longest_edge !== undefined) { // http://opensourcehacker.com/2011/12/01/calculate-aspect-ratio-conserving-resize-for-images-in-javascript/ // Try resize so that shortest edge is `shortest_edge` (target) const shortResizeFactor = shortest_edge === undefined ? 1 // If `shortest_edge` is not set, don't upscale : Math.max(shortest_edge / srcWidth, shortest_edge / srcHeight); const newWidth = srcWidth * shortResizeFactor; const newHeight = srcHeight * shortResizeFactor; // The new width and height might be greater than `longest_edge`, so // we downscale again to ensure the largest dimension is `longest_edge` const longResizeFactor = longest_edge === undefined ? 1 // If `longest_edge` is not set, don't downscale : Math.min(longest_edge / newWidth, longest_edge / newHeight); // To avoid certain floating point precision issues, we round to 2 decimal places let finalWidth = Math.floor(Number((newWidth * longResizeFactor).toFixed(2))); let finalHeight = Math.floor(Number((newHeight * longResizeFactor).toFixed(2))); if (this.size_divisibility !== undefined) { [finalWidth, finalHeight] = enforce_size_divisibility([finalWidth, finalHeight], this.size_divisibility) } return [finalWidth, finalHeight]; } else if (size !== undefined && size.width !== undefined && size.height !== undefined) { // If `width` and `height` are set, resize to those dimensions let newWidth = size.width; let newHeight = size.height; // Custom for DPT models if (this.config.keep_aspect_ratio && this.config.ensure_multiple_of) { // determine new height and width let scale_height = newHeight / srcHeight; let scale_width = newWidth / srcWidth; // scale as little as possible if (Math.abs(1 - scale_width) < Math.abs(1 - scale_height)) { // fit width scale_height = scale_width; } else { // fit height scale_width = scale_height; } newHeight = constraint_to_multiple_of(scale_height * srcHeight, this.config.ensure_multiple_of); newWidth = constraint_to_multiple_of(scale_width * srcWidth, this.config.ensure_multiple_of); } return [newWidth, newHeight]; } else if (this.size_divisibility !== undefined) { return enforce_size_divisibility([srcWidth, srcHeight], this.size_divisibility); } else if (this.min_pixels !== undefined && this.max_pixels !== undefined) { // Custom resize logic for Qwen2-VL models // @ts-expect-error TS2339 const factor = this.config.patch_size * this.config.merge_size; return smart_resize(srcHeight, srcWidth, factor, this.min_pixels, this.max_pixels); } else { throw new Error(`Could not resize image due to unsupported \`this.size\` option in config: ${JSON.stringify(size)}`); } } /** * Resizes the image. * @param {RawImage} image The image to resize. * @returns {Promise<RawImage>} The resized image. */ async resize(image) { const [newWidth, newHeight] = this.get_resize_output_image_size(image, this.size); return await image.resize(newWidth, newHeight, { // @ts-expect-error TS2322 resample: this.resample, }); } /** * @typedef {object} PreprocessedImage * @property {HeightWidth} original_size The original size of the image. * @property {HeightWidth} reshaped_input_size The reshaped input size of the image. * @property {Tensor} pixel_values The pixel values of the preprocessed image. */ /** * Preprocesses the given image. * * @param {RawImage} image The image to preprocess. * @param {Object} overrides The overrides for the preprocessing options. * @returns {Promise<PreprocessedImage>} The preprocessed image. */ async preprocess(image, { do_normalize = null, do_pad = null, do_convert_rgb = null, do_convert_grayscale = null, do_flip_channel_order = null, } = {}) { if (this.do_crop_margin) { // NOTE: Specific to nougat processors. This is done before resizing, // and can be interpreted as a pre-preprocessing step. image = await this.crop_margin(image); } const [srcWidth, srcHeight] = image.size; // original image size // Convert image to RGB if specified in config. if (do_convert_rgb ?? this.do_convert_rgb) { image = image.rgb(); } else if (do_convert_grayscale) { image = image.grayscale(); } // TODO: // For efficiency reasons, it might be best to merge the resize and center crop operations into one. // Resize all images if (this.do_resize) { image = await this.resize(image); } // Resize the image using thumbnail method. if (this.do_thumbnail) { // @ts-expect-error TS2345 image = await this.thumbnail(image, this.size, this.resample); } if (this.do_center_crop) { let crop_width; let crop_height; if (Number.isInteger(this.crop_size)) { crop_width = this.crop_size; crop_height = this.crop_size; } else { crop_width = this.crop_size.width; crop_height = this.crop_size.height; } image = await image.center_crop(crop_width, crop_height); } /** @type {HeightWidth} */ const reshaped_input_size = [image.height, image.width]; // NOTE: All pixel-level manipulation (i.e., modifying `pixelData`) // occurs with data in the hwc format (height, width, channels), // to emulate the behavior of the original Python code (w/ numpy). /** @type {Float32Array} */ let pixelData = Float32Array.from(image.data); let imgDims = [image.height, image.width, image.channels]; if (this.do_rescale) { this.rescale(pixelData); } if (do_normalize ?? this.do_normalize) { let image_mean = this.image_mean; if (!Array.isArray(this.image_mean)) { image_mean = new Array(image.channels).fill(image_mean); } let image_std = this.image_std; if (!Array.isArray(this.image_std)) { image_std = new Array(image.channels).fill(image_mean); } if (image_mean.length !== image.channels || image_std.length !== image.channels) { throw new Error(`When set to arrays, the length of \`image_mean\` (${image_mean.length}) and \`image_std\` (${image_std.length}) must match the number of channels in the image (${image.channels}).`); } for (let i = 0; i < pixelData.length; i += image.channels) { for (let j = 0; j < image.channels; ++j) { pixelData[i + j] = (pixelData[i + j] - image_mean[j]) / image_std[j]; } } } // do padding after rescaling/normalizing if (do_pad ?? this.do_pad) { if (this.pad_size) { const padded = this.pad_image(pixelData, [image.height, image.width, image.channels], this.pad_size); [pixelData, imgDims] = padded; // Update pixel data and image dimensions } else if (this.size_divisibility) { const [paddedWidth, paddedHeight] = enforce_size_divisibility([imgDims[1], imgDims[0]], this.size_divisibility); [pixelData, imgDims] = this.pad_image(pixelData, imgDims, { width: paddedWidth, height: paddedHeight }); } } if (do_flip_channel_order ?? this.do_flip_channel_order) { if (imgDims[2] !== 3) { throw new Error('Flipping channel order is only supported for RGB images.'); } // Convert RGB to BGR for (let i = 0; i < pixelData.length; i += 3) { const temp = pixelData[i]; pixelData[i] = pixelData[i + 2]; pixelData[i + 2] = temp; } } const pixel_values = new Tensor('float32', pixelData, imgDims) .permute(2, 0, 1); // convert to channel dimension format (hwc -> chw) return { original_size: [srcHeight, srcWidth], reshaped_input_size: reshaped_input_size, pixel_values, } } /** * Calls the feature extraction process on an array of images, * preprocesses each image, and concatenates the resulting * features into a single Tensor. * @param {RawImage[]} images The image(s) to extract features from. * @param {...any} args Additional arguments. * @returns {Promise<ImageProcessorResult>} An object containing the concatenated pixel values (and other metadata) of the preprocessed images. */ async _call(images, ...args) { if (!Array.isArray(images)) { images = [images]; } /** @type {PreprocessedImage[]} */ const imageData = await Promise.all(images.map(x => this.preprocess(x))); // Stack pixel values const pixel_values = stack(imageData.map(x => x.pixel_values), 0); return { pixel_values, // Original sizes of images original_sizes: imageData.map(x => x.original_size), // Reshaped sizes of images, before padding or cropping reshaped_input_sizes: imageData.map(x => x.reshaped_input_size), } } /** * Instantiate one of the processor classes of the library from a pretrained model. * * The processor class to instantiate is selected based on the `image_processor_type` (or `feature_extractor_type`; legacy) * property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) * * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: * - A string, the *model id* of a pretrained processor hosted inside a model repo on huggingface.co. * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a * user or organization name, like `dbmdz/bert-base-german-cased`. * - A path to a *directory* containing processor files, e.g., `./my_model_directory/`. * @param {import('../utils/hub.js').PretrainedOptions} options Additional options for loading the processor. * * @returns {Promise<ImageProcessor>} A new instance of the Processor class. */ static async from_pretrained(pretrained_model_name_or_path, options={}) { const preprocessorConfig = await getModelJSON(pretrained_model_name_or_path, IMAGE_PROCESSOR_NAME, true, options); return new this(preprocessorConfig); } }
transformers.js/src/base/image_processors_utils.js/0
{ "file_path": "transformers.js/src/base/image_processors_utils.js", "repo_id": "transformers.js", "token_count": 18547 }
313
import { ImageProcessor, } from "../../base/image_processors_utils.js"; export class BitImageProcessor extends ImageProcessor { }
transformers.js/src/models/bit/image_processing_bit.js/0
{ "file_path": "transformers.js/src/models/bit/image_processing_bit.js", "repo_id": "transformers.js", "token_count": 43 }
314
import { Processor } from "../../base/processing_utils.js"; import { AutoImageProcessor } from "../auto/image_processing_auto.js"; import { AutoFeatureExtractor } from "../auto/feature_extraction_auto.js"; import { AutoTokenizer } from "../../tokenizers.js"; import { RawImage } from "../../utils/image.js"; import { RawAudio } from "../../utils/audio.js"; export class Gemma3nProcessor extends Processor { static image_processor_class = AutoImageProcessor; static feature_extractor_class = AutoFeatureExtractor; static tokenizer_class = AutoTokenizer; static uses_processor_config = true; static uses_chat_template_file = true; constructor(config, components, chat_template) { super(config, components, chat_template); this.audio_seq_length = this.config.audio_seq_length; this.image_seq_length = this.config.image_seq_length; const { // Audio tokens audio_token_id, boa_token, audio_token, eoa_token, // Image tokens image_token_id, boi_token, image_token, eoi_token } = this.tokenizer.config; this.audio_token_id = audio_token_id this.boa_token = boa_token this.audio_token = audio_token const audio_tokens_expanded = audio_token.repeat(this.audio_seq_length); this.full_audio_sequence = `\n\n${boa_token}${audio_tokens_expanded}${eoa_token}\n\n` this.image_token_id = image_token_id this.boi_token = boi_token this.image_token = image_token const image_tokens_expanded = image_token.repeat(this.image_seq_length); this.full_image_sequence = `\n\n${boi_token}${image_tokens_expanded}${eoi_token}\n\n` } /** * * @param {string|string[]} text * @param {RawImage|RawImage[]|RawImage[][]} images * @param {RawAudio|RawAudio[]|RawAudio[][]} audio * @returns {Promise<any>} */ async _call(text, images = null, audio = null, options = {}) { if (typeof text === 'string') { text = [text]; } let audio_inputs; if (audio) { audio_inputs = await this.feature_extractor(audio, options); text = text.map(prompt => prompt.replaceAll(this.audio_token, this.full_audio_sequence)); } let image_inputs; if (images) { image_inputs = await this.image_processor(images, options); text = text.map(prompt => prompt.replaceAll(this.image_token, this.full_image_sequence)); } let text_inputs = this.tokenizer(text, options); return { ...text_inputs, ...image_inputs, ...audio_inputs, } } }
transformers.js/src/models/gemma3n/processing_gemma3n.js/0
{ "file_path": "transformers.js/src/models/gemma3n/processing_gemma3n.js", "repo_id": "transformers.js", "token_count": 1150 }
315
import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js'; import { Tensor } from '../../utils/tensor.js'; import { max, softmax } from '../../utils/maths.js'; export class PyAnnoteFeatureExtractor extends FeatureExtractor { /** * Asynchronously extracts features from a given audio using the provided configuration. * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. * @returns {Promise<{ input_values: Tensor; }>} The extracted input features. */ async _call(audio) { validate_audio_inputs(audio, 'PyAnnoteFeatureExtractor'); if (audio instanceof Float64Array) { audio = new Float32Array(audio); } const shape = [ 1, /* batch_size */ 1, /* num_channels */ audio.length, /* num_samples */ ]; return { input_values: new Tensor('float32', audio, shape), }; } /** * NOTE: Can return fractional values. `Math.ceil` will ensure correct value. * @param {number} samples The number of frames in the audio. * @returns {number} The number of frames in the audio. */ samples_to_frames(samples) { return ((samples - this.config.offset) / this.config.step); } /** * Post-processes the speaker diarization logits output by the model. * @param {import('../../utils/tensor.js').Tensor} logits The speaker diarization logits output by the model. * @param {number} num_samples Number of samples in the input audio. * @returns {Array<Array<{ id: number, start: number, end: number, confidence: number }>>} The post-processed speaker diarization results. */ post_process_speaker_diarization(logits, num_samples) { const ratio = ( num_samples / this.samples_to_frames(num_samples) ) / this.config.sampling_rate; const results = []; for (const scores of logits.tolist()) { const accumulated_segments = []; let current_speaker = -1; for (let i = 0; i < scores.length; ++i) { /** @type {number[]} */ const probabilities = softmax(scores[i]); const [score, id] = max(probabilities); const [start, end] = [i, i + 1]; if (id !== current_speaker) { // Speaker has changed current_speaker = id; accumulated_segments.push({ id, start, end, score }); } else { // Continue the current segment accumulated_segments.at(-1).end = end; accumulated_segments.at(-1).score += score; } } results.push(accumulated_segments.map( // Convert frame-space to time-space // and compute the confidence ({ id, start, end, score }) => ({ id, start: start * ratio, end: end * ratio, confidence: score / (end - start), }) )); } return results; } }
transformers.js/src/models/pyannote/feature_extraction_pyannote.js/0
{ "file_path": "transformers.js/src/models/pyannote/feature_extraction_pyannote.js", "repo_id": "transformers.js", "token_count": 1478 }
316
import { ImageProcessor, } from "../../base/image_processors_utils.js"; export class Swin2SRImageProcessor extends ImageProcessor { pad_image(pixelData, imgDims, padSize, options = {}) { // NOTE: In this case, `padSize` represents the size of the sliding window for the local attention. // In other words, the image is padded so that its width and height are multiples of `padSize`. const [imageHeight, imageWidth, imageChannels] = imgDims; return super.pad_image(pixelData, imgDims, { // NOTE: For Swin2SR models, the original python implementation adds padding even when the image's width/height is already // a multiple of `pad_size`. However, this is most likely a bug (PR: https://github.com/mv-lab/swin2sr/pull/19). // For this reason, we only add padding when the image's width/height is not a multiple of `pad_size`. width: imageWidth + (padSize - imageWidth % padSize) % padSize, height: imageHeight + (padSize - imageHeight % padSize) % padSize, }, { mode: 'symmetric', center: false, constant_values: -1, ...options, }) } }
transformers.js/src/models/swin2sr/image_processing_swin2sr.js/0
{ "file_path": "transformers.js/src/models/swin2sr/image_processing_swin2sr.js", "repo_id": "transformers.js", "token_count": 462 }
317
/** * @file Pipelines provide a high-level, easy to use, API for running machine learning models. * * **Example:** Instantiate pipeline using the `pipeline` function. * ```javascript * import { pipeline } from '@huggingface/transformers'; * * const classifier = await pipeline('sentiment-analysis'); * const output = await classifier('I love transformers!'); * // [{'label': 'POSITIVE', 'score': 0.999817686}] * ``` * * @module pipelines */ import { AutoTokenizer, PreTrainedTokenizer, } from './tokenizers.js'; import { AutoModel, AutoModelForSequenceClassification, AutoModelForAudioClassification, AutoModelForTokenClassification, AutoModelForQuestionAnswering, AutoModelForMaskedLM, AutoModelForSeq2SeqLM, AutoModelForSpeechSeq2Seq, AutoModelForTextToWaveform, AutoModelForTextToSpectrogram, AutoModelForCTC, AutoModelForCausalLM, AutoModelForVision2Seq, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForSemanticSegmentation, AutoModelForUniversalSegmentation, AutoModelForObjectDetection, AutoModelForZeroShotObjectDetection, AutoModelForDocumentQuestionAnswering, AutoModelForImageToImage, AutoModelForDepthEstimation, AutoModelForImageFeatureExtraction, PreTrainedModel, } from './models.js'; import { AutoProcessor, } from './models/auto/processing_auto.js'; import { Processor, } from './base/processing_utils.js'; import { Callable, } from './utils/generic.js'; import { dispatchCallback, product, } from './utils/core.js'; import { softmax, max, round, } from './utils/maths.js'; import { read_audio, RawAudio } from './utils/audio.js'; import { Tensor, mean_pooling, interpolate_4d, quantize_embeddings, topk, } from './utils/tensor.js'; import { RawImage } from './utils/image.js'; /** * @typedef {string | RawImage | URL | Blob | HTMLCanvasElement | OffscreenCanvas} ImageInput * @typedef {ImageInput|ImageInput[]} ImagePipelineInputs */ /** * Prepare images for further tasks. * @param {ImagePipelineInputs} images images to prepare. * @returns {Promise<RawImage[]>} returns processed images. * @private */ async function prepareImages(images) { if (!Array.isArray(images)) { images = [images]; } // Possibly convert any non-images to images return await Promise.all(images.map(x => RawImage.read(x))); } /** * @typedef {string | URL | Float32Array | Float64Array} AudioInput * @typedef {AudioInput|AudioInput[]} AudioPipelineInputs */ /** * Prepare audios for further tasks. * @param {AudioPipelineInputs} audios audios to prepare. * @param {number} sampling_rate sampling rate of the audios. * @returns {Promise<Float32Array[]>} The preprocessed audio data. * @private */ async function prepareAudios(audios, sampling_rate) { if (!Array.isArray(audios)) { audios = [audios]; } return await Promise.all(audios.map(x => { if (typeof x === 'string' || x instanceof URL) { return read_audio(x, sampling_rate); } else if (x instanceof Float64Array) { return new Float32Array(x); } return x; })); } /** * @typedef {Object} BoundingBox * @property {number} xmin The minimum x coordinate of the bounding box. * @property {number} ymin The minimum y coordinate of the bounding box. * @property {number} xmax The maximum x coordinate of the bounding box. * @property {number} ymax The maximum y coordinate of the bounding box. */ /** * Helper function to convert list [xmin, xmax, ymin, ymax] into object { "xmin": xmin, ... } * @param {number[]} box The bounding box as a list. * @param {boolean} asInteger Whether to cast to integers. * @returns {BoundingBox} The bounding box as an object. * @private */ function get_bounding_box(box, asInteger) { if (asInteger) { box = box.map(x => x | 0); } const [xmin, ymin, xmax, ymax] = box; return { xmin, ymin, xmax, ymax }; } /** * @callback DisposeType Disposes the item. * @returns {Promise<void>} A promise that resolves when the item has been disposed. * * @typedef {Object} Disposable * @property {DisposeType} dispose A promise that resolves when the pipeline has been disposed. */ /** * The Pipeline class is the class from which all pipelines inherit. * Refer to this class for methods shared across different pipelines. */ export class Pipeline extends Callable { /** * Create a new Pipeline. * @param {Object} options An object containing the following properties: * @param {string} [options.task] The task of the pipeline. Useful for specifying subtasks. * @param {PreTrainedModel} [options.model] The model used by the pipeline. * @param {PreTrainedTokenizer} [options.tokenizer=null] The tokenizer used by the pipeline (if any). * @param {Processor} [options.processor=null] The processor used by the pipeline (if any). */ constructor({ task, model, tokenizer = null, processor = null }) { super(); this.task = task; this.model = model; this.tokenizer = tokenizer; this.processor = processor; } /** @type {DisposeType} */ async dispose() { await this.model.dispose(); } } /** * @typedef {Object} ModelTokenizerConstructorArgs * @property {string} task The task of the pipeline. Useful for specifying subtasks. * @property {PreTrainedModel} model The model used by the pipeline. * @property {PreTrainedTokenizer} tokenizer The tokenizer used by the pipeline. * * @typedef {ModelTokenizerConstructorArgs} TextPipelineConstructorArgs An object used to instantiate a text-based pipeline. */ /** * @typedef {Object} ModelProcessorConstructorArgs * @property {string} task The task of the pipeline. Useful for specifying subtasks. * @property {PreTrainedModel} model The model used by the pipeline. * @property {Processor} processor The processor used by the pipeline. * * @typedef {ModelProcessorConstructorArgs} AudioPipelineConstructorArgs An object used to instantiate an audio-based pipeline. * @typedef {ModelProcessorConstructorArgs} ImagePipelineConstructorArgs An object used to instantiate an image-based pipeline. */ /** * @typedef {Object} ModelTokenizerProcessorConstructorArgs * @property {string} task The task of the pipeline. Useful for specifying subtasks. * @property {PreTrainedModel} model The model used by the pipeline. * @property {PreTrainedTokenizer} tokenizer The tokenizer used by the pipeline. * @property {Processor} processor The processor used by the pipeline. * * @typedef {ModelTokenizerProcessorConstructorArgs} TextAudioPipelineConstructorArgs An object used to instantiate a text- and audio-based pipeline. * @typedef {ModelTokenizerProcessorConstructorArgs} TextImagePipelineConstructorArgs An object used to instantiate a text- and image-based pipeline. */ /** * @typedef {Object} TextClassificationSingle * @property {string} label The label predicted. * @property {number} score The corresponding probability. * @typedef {TextClassificationSingle[]} TextClassificationOutput * * @typedef {Object} TextClassificationPipelineOptions Parameters specific to text classification pipelines. * @property {number} [top_k=1] The number of top predictions to be returned. * * @callback TextClassificationPipelineCallback Classify the text(s) given as inputs. * @param {string|string[]} texts The input text(s) to be classified. * @param {TextClassificationPipelineOptions} [options] The options to use for text classification. * @returns {Promise<TextClassificationOutput|TextClassificationOutput[]>} An array or object containing the predicted labels and scores. * * @typedef {TextPipelineConstructorArgs & TextClassificationPipelineCallback & Disposable} TextClassificationPipelineType */ /** * Text classification pipeline using any `ModelForSequenceClassification`. * * **Example:** Sentiment-analysis w/ `Xenova/distilbert-base-uncased-finetuned-sst-2-english`. * ```javascript * const classifier = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english'); * const output = await classifier('I love transformers!'); * // [{ label: 'POSITIVE', score: 0.999788761138916 }] * ``` * * **Example:** Multilingual sentiment-analysis w/ `Xenova/bert-base-multilingual-uncased-sentiment` (and return top 5 classes). * ```javascript * const classifier = await pipeline('sentiment-analysis', 'Xenova/bert-base-multilingual-uncased-sentiment'); * const output = await classifier('Le meilleur film de tous les temps.', { top_k: 5 }); * // [ * // { label: '5 stars', score: 0.9610759615898132 }, * // { label: '4 stars', score: 0.03323351591825485 }, * // { label: '3 stars', score: 0.0036155181005597115 }, * // { label: '1 star', score: 0.0011325967498123646 }, * // { label: '2 stars', score: 0.0009423971059732139 } * // ] * ``` * * **Example:** Toxic comment classification w/ `Xenova/toxic-bert` (and return all classes). * ```javascript * const classifier = await pipeline('text-classification', 'Xenova/toxic-bert'); * const output = await classifier('I hate you!', { top_k: null }); * // [ * // { label: 'toxic', score: 0.9593140482902527 }, * // { label: 'insult', score: 0.16187334060668945 }, * // { label: 'obscene', score: 0.03452680632472038 }, * // { label: 'identity_hate', score: 0.0223250575363636 }, * // { label: 'threat', score: 0.019197041168808937 }, * // { label: 'severe_toxic', score: 0.005651099607348442 } * // ] * ``` */ export class TextClassificationPipeline extends (/** @type {new (options: TextPipelineConstructorArgs) => TextClassificationPipelineType} */ (Pipeline)) { /** * Create a new TextClassificationPipeline. * @param {TextPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {TextClassificationPipelineCallback} */ async _call(texts, { top_k = 1 } = {}) { // Run tokenization const model_inputs = this.tokenizer(texts, { padding: true, truncation: true, }); // Run model const outputs = await this.model(model_inputs) // TODO: Use softmax tensor function const function_to_apply = // @ts-expect-error TS2339 this.model.config.problem_type === 'multi_label_classification' ? batch => batch.sigmoid() : batch => new Tensor( 'float32', softmax(batch.data), batch.dims, ); // single_label_classification (default) // @ts-expect-error TS2339 const id2label = this.model.config.id2label; const toReturn = []; for (const batch of outputs.logits) { const output = function_to_apply(batch); const scores = await topk(output, top_k); const values = scores[0].tolist(); const indices = scores[1].tolist(); const vals = indices.map((x, i) => ({ label: id2label ? id2label[x] : `LABEL_${x}`, score: values[i], })); if (top_k === 1) { toReturn.push(...vals); } else { toReturn.push(vals); } } return Array.isArray(texts) || top_k === 1 ? /** @type {TextClassificationOutput} */ (toReturn) : /** @type {TextClassificationOutput[]} */ (toReturn)[0]; } } /** * @typedef {Object} TokenClassificationSingle * @property {string} word The token/word classified. This is obtained by decoding the selected tokens. * @property {number} score The corresponding probability for `entity`. * @property {string} entity The entity predicted for that token/word. * @property {number} index The index of the corresponding token in the sentence. * @property {number} [start] The index of the start of the corresponding entity in the sentence. * @property {number} [end] The index of the end of the corresponding entity in the sentence. * @typedef {TokenClassificationSingle[]} TokenClassificationOutput * * @typedef {Object} TokenClassificationPipelineOptions Parameters specific to token classification pipelines. * @property {string[]} [ignore_labels] A list of labels to ignore. * * @callback TokenClassificationPipelineCallback Classify each token of the text(s) given as inputs. * @param {string|string[]} texts One or several texts (or one list of texts) for token classification. * @param {TokenClassificationPipelineOptions} [options] The options to use for token classification. * @returns {Promise<TokenClassificationOutput|TokenClassificationOutput[]>} The result. * * @typedef {TextPipelineConstructorArgs & TokenClassificationPipelineCallback & Disposable} TokenClassificationPipelineType */ /** * Named Entity Recognition pipeline using any `ModelForTokenClassification`. * * **Example:** Perform named entity recognition with `Xenova/bert-base-NER`. * ```javascript * const classifier = await pipeline('token-classification', 'Xenova/bert-base-NER'); * const output = await classifier('My name is Sarah and I live in London'); * // [ * // { entity: 'B-PER', score: 0.9980202913284302, index: 4, word: 'Sarah' }, * // { entity: 'B-LOC', score: 0.9994474053382874, index: 9, word: 'London' } * // ] * ``` * * **Example:** Perform named entity recognition with `Xenova/bert-base-NER` (and return all labels). * ```javascript * const classifier = await pipeline('token-classification', 'Xenova/bert-base-NER'); * const output = await classifier('Sarah lives in the United States of America', { ignore_labels: [] }); * // [ * // { entity: 'B-PER', score: 0.9966587424278259, index: 1, word: 'Sarah' }, * // { entity: 'O', score: 0.9987385869026184, index: 2, word: 'lives' }, * // { entity: 'O', score: 0.9990072846412659, index: 3, word: 'in' }, * // { entity: 'O', score: 0.9988298416137695, index: 4, word: 'the' }, * // { entity: 'B-LOC', score: 0.9995510578155518, index: 5, word: 'United' }, * // { entity: 'I-LOC', score: 0.9990395307540894, index: 6, word: 'States' }, * // { entity: 'I-LOC', score: 0.9986724853515625, index: 7, word: 'of' }, * // { entity: 'I-LOC', score: 0.9975294470787048, index: 8, word: 'America' } * // ] * ``` */ export class TokenClassificationPipeline extends (/** @type {new (options: TextPipelineConstructorArgs) => TokenClassificationPipelineType} */ (Pipeline)) { /** * Create a new TokenClassificationPipeline. * @param {TextPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {TokenClassificationPipelineCallback} */ async _call(texts, { ignore_labels = ['O'], } = {}) { const isBatched = Array.isArray(texts); // Run tokenization const model_inputs = this.tokenizer(isBatched ? texts : [texts], { padding: true, truncation: true, }); // Run model const outputs = await this.model(model_inputs) const logits = outputs.logits; // @ts-expect-error TS2339 const id2label = this.model.config.id2label; const toReturn = []; for (let i = 0; i < logits.dims[0]; ++i) { const ids = model_inputs.input_ids[i]; const batch = logits[i]; // List of tokens that aren't ignored const tokens = []; for (let j = 0; j < batch.dims[0]; ++j) { const tokenData = batch[j]; const topScoreIndex = max(tokenData.data)[1]; const entity = id2label ? id2label[topScoreIndex] : `LABEL_${topScoreIndex}`; if (ignore_labels.includes(entity)) { // We predicted a token that should be ignored. So, we skip it. continue; } // TODO add option to keep special tokens? const word = this.tokenizer.decode([ids[j].item()], { skip_special_tokens: true }); if (word === '') { // Was a special token. So, we skip it. continue; } const scores = softmax(tokenData.data); tokens.push({ entity: entity, score: scores[topScoreIndex], index: j, word: word, // TODO: Add support for start and end // start: null, // end: null, }); } toReturn.push(tokens); } return isBatched ? toReturn : toReturn[0]; } } /** * @typedef {Object} QuestionAnsweringOutput * @property {number} score The probability associated to the answer. * @property {number} [start] The character start index of the answer (in the tokenized version of the input). * @property {number} [end] The character end index of the answer (in the tokenized version of the input). * @property {string} answer The answer to the question. * * @typedef {Object} QuestionAnsweringPipelineOptions Parameters specific to question answering pipelines. * @property {number} [top_k=1] The number of top answer predictions to be returned. * * @callback QuestionAnsweringPipelineCallback Answer the question(s) given as inputs by using the context(s). * @param {string|string[]} question One or several question(s) (must be used in conjunction with the `context` argument). * @param {string|string[]} context One or several context(s) associated with the question(s) (must be used in conjunction with the `question` argument). * @param {QuestionAnsweringPipelineOptions} [options] The options to use for question answering. * @returns {Promise<QuestionAnsweringOutput|QuestionAnsweringOutput[]>} An array or object containing the predicted answers and scores. * * @typedef {TextPipelineConstructorArgs & QuestionAnsweringPipelineCallback & Disposable} QuestionAnsweringPipelineType */ /** * Question Answering pipeline using any `ModelForQuestionAnswering`. * * **Example:** Run question answering with `Xenova/distilbert-base-uncased-distilled-squad`. * ```javascript * const answerer = await pipeline('question-answering', 'Xenova/distilbert-base-uncased-distilled-squad'); * const question = 'Who was Jim Henson?'; * const context = 'Jim Henson was a nice puppet.'; * const output = await answerer(question, context); * // { * // answer: "a nice puppet", * // score: 0.5768911502526741 * // } * ``` */ export class QuestionAnsweringPipeline extends (/** @type {new (options: TextPipelineConstructorArgs) => QuestionAnsweringPipelineType} */ (Pipeline)) { /** * Create a new QuestionAnsweringPipeline. * @param {TextPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {QuestionAnsweringPipelineCallback} */ async _call(question, context, { top_k = 1 } = {}) { // Run tokenization const inputs = this.tokenizer(question, { text_pair: context, padding: true, truncation: true, }); const { start_logits, end_logits } = await this.model(inputs); const input_ids = inputs.input_ids.tolist(); const attention_mask = inputs.attention_mask.tolist(); // TODO: add support for `return_special_tokens_mask` const special_tokens = this.tokenizer.all_special_ids; /** @type {QuestionAnsweringOutput[]} */ const toReturn = []; for (let j = 0; j < start_logits.dims[0]; ++j) { const ids = input_ids[j]; const sepIndex = ids.findIndex(x => // We use == to match bigint with number // @ts-ignore x == this.tokenizer.sep_token_id ); const valid_mask = attention_mask[j].map((y, ix) => ( y == 1 && ( ix === 0 // is cls_token || ( ix > sepIndex && special_tokens.findIndex(x => x == ids[ix]) === -1 // token is not a special token (special_tokens_mask == 0) ) ) )); const start = start_logits[j].tolist(); const end = end_logits[j].tolist(); // Now, we mask out values that can't be in the answer // NOTE: We keep the cls_token unmasked (some models use it to indicate unanswerable questions) for (let i = 1; i < start.length; ++i) { if ( attention_mask[j] == 0 // is part of padding || i <= sepIndex // is before the sep_token || special_tokens.findIndex(x => x == ids[i]) !== -1 // Is a special token ) { // Make sure non-context indexes in the tensor cannot contribute to the softmax start[i] = -Infinity; end[i] = -Infinity; } } // Normalize logits and spans to retrieve the answer const start_scores = softmax(start).map((x, i) => [x, i]); const end_scores = softmax(end).map((x, i) => [x, i]); // Mask CLS start_scores[0][0] = 0; end_scores[0][0] = 0; // Generate all valid spans and select best ones const options = product(start_scores, end_scores) .filter(x => x[0][1] <= x[1][1]) .map(x => [x[0][1], x[1][1], x[0][0] * x[1][0]]) .sort((a, b) => b[2] - a[2]); for (let k = 0; k < Math.min(options.length, top_k); ++k) { const [start, end, score] = options[k]; const answer_tokens = ids.slice(start, end + 1) const answer = this.tokenizer.decode(answer_tokens, { skip_special_tokens: true, }); // TODO add start and end? // NOTE: HF returns character index toReturn.push({ answer, score }); } } // Mimic HF's return type based on top_k return (top_k === 1) ? toReturn[0] : toReturn; } } /** * @typedef {Object} FillMaskSingle * @property {string} sequence The corresponding input with the mask token prediction. * @property {number} score The corresponding probability. * @property {number} token The predicted token id (to replace the masked one). * @property {string} token_str The predicted token (to replace the masked one). * @typedef {FillMaskSingle[]} FillMaskOutput * * @typedef {Object} FillMaskPipelineOptions Parameters specific to fill mask pipelines. * @property {number} [top_k=5] When passed, overrides the number of predictions to return. * * @callback FillMaskPipelineCallback Fill the masked token in the text(s) given as inputs. * @param {string|string[]} texts One or several texts (or one list of prompts) with masked tokens. * @param {FillMaskPipelineOptions} [options] The options to use for masked language modelling. * @returns {Promise<FillMaskOutput|FillMaskOutput[]>} An array of objects containing the score, predicted token, predicted token string, * and the sequence with the predicted token filled in, or an array of such arrays (one for each input text). * If only one input text is given, the output will be an array of objects. * @throws {Error} When the mask token is not found in the input text. * * @typedef {TextPipelineConstructorArgs & FillMaskPipelineCallback & Disposable} FillMaskPipelineType */ /** * Masked language modeling prediction pipeline using any `ModelWithLMHead`. * * **Example:** Perform masked language modelling (a.k.a. "fill-mask") with `Xenova/bert-base-uncased`. * ```javascript * const unmasker = await pipeline('fill-mask', 'Xenova/bert-base-cased'); * const output = await unmasker('The goal of life is [MASK].'); * // [ * // { token_str: 'survival', score: 0.06137419492006302, token: 8115, sequence: 'The goal of life is survival.' }, * // { token_str: 'love', score: 0.03902450203895569, token: 1567, sequence: 'The goal of life is love.' }, * // { token_str: 'happiness', score: 0.03253183513879776, token: 9266, sequence: 'The goal of life is happiness.' }, * // { token_str: 'freedom', score: 0.018736306577920914, token: 4438, sequence: 'The goal of life is freedom.' }, * // { token_str: 'life', score: 0.01859794743359089, token: 1297, sequence: 'The goal of life is life.' } * // ] * ``` * * **Example:** Perform masked language modelling (a.k.a. "fill-mask") with `Xenova/bert-base-cased` (and return top result). * ```javascript * const unmasker = await pipeline('fill-mask', 'Xenova/bert-base-cased'); * const output = await unmasker('The Milky Way is a [MASK] galaxy.', { top_k: 1 }); * // [{ token_str: 'spiral', score: 0.6299987435340881, token: 14061, sequence: 'The Milky Way is a spiral galaxy.' }] * ``` */ export class FillMaskPipeline extends (/** @type {new (options: TextPipelineConstructorArgs) => FillMaskPipelineType} */ (Pipeline)) { /** * Create a new FillMaskPipeline. * @param {TextPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {FillMaskPipelineCallback} */ async _call(texts, { top_k = 5 } = {}) { // Run tokenization const model_inputs = this.tokenizer(texts, { padding: true, truncation: true, }); // Run model const { logits } = await this.model(model_inputs) const toReturn = []; /** @type {bigint[][]} */ const input_ids = model_inputs.input_ids.tolist(); for (let i = 0; i < input_ids.length; ++i) { const ids = input_ids[i]; const mask_token_index = ids.findIndex(x => // We use == to match bigint with number // @ts-ignore x == this.tokenizer.mask_token_id ); if (mask_token_index === -1) { throw Error(`Mask token (${this.tokenizer.mask_token}) not found in text.`) } const itemLogits = logits[i][mask_token_index]; const scores = await topk(new Tensor( 'float32', softmax(itemLogits.data), itemLogits.dims, ), top_k); const values = scores[0].tolist(); const indices = scores[1].tolist(); toReturn.push(indices.map((x, i) => { const sequence = ids.slice(); sequence[mask_token_index] = x; return { score: values[i], token: Number(x), token_str: this.tokenizer.decode([x]), sequence: this.tokenizer.decode(sequence, { skip_special_tokens: true }), } })); } return Array.isArray(texts) ? toReturn : toReturn[0]; } } /** * @typedef {Object} Text2TextGenerationSingle * @property {string} generated_text The generated text. * @typedef {Text2TextGenerationSingle[]} Text2TextGenerationOutput * * @callback Text2TextGenerationPipelineCallback Generate the output text(s) using text(s) given as inputs. * @param {string|string[]} texts Input text for the encoder. * @param {Partial<import('./generation/configuration_utils.js').GenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model. * @returns {Promise<Text2TextGenerationOutput|Text2TextGenerationOutput[]>} * * @typedef {TextPipelineConstructorArgs & Text2TextGenerationPipelineCallback & Disposable} Text2TextGenerationPipelineType */ /** * Text2TextGenerationPipeline class for generating text using a model that performs text-to-text generation tasks. * * **Example:** Text-to-text generation w/ `Xenova/LaMini-Flan-T5-783M`. * ```javascript * const generator = await pipeline('text2text-generation', 'Xenova/LaMini-Flan-T5-783M'); * const output = await generator('how can I become more healthy?', { * max_new_tokens: 100, * }); * // [{ generated_text: "To become more healthy, you can: 1. Eat a balanced diet with plenty of fruits, vegetables, whole grains, lean proteins, and healthy fats. 2. Stay hydrated by drinking plenty of water. 3. Get enough sleep and manage stress levels. 4. Avoid smoking and excessive alcohol consumption. 5. Regularly exercise and maintain a healthy weight. 6. Practice good hygiene and sanitation. 7. Seek medical attention if you experience any health issues." }] * ``` */ export class Text2TextGenerationPipeline extends (/** @type {new (options: TextPipelineConstructorArgs) => Text2TextGenerationPipelineType} */ (Pipeline)) { /** @type {'generated_text'} */ _key = 'generated_text'; /** * Create a new Text2TextGenerationPipeline. * @param {TextPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {Text2TextGenerationPipelineCallback} */ async _call(texts, generate_kwargs = {}) { if (!Array.isArray(texts)) { texts = [texts]; } // Add global prefix, if present // @ts-expect-error TS2339 if (this.model.config.prefix) { // @ts-expect-error TS2339 texts = texts.map(x => this.model.config.prefix + x) } // Handle task specific params: // @ts-expect-error TS2339 const task_specific_params = this.model.config.task_specific_params if (task_specific_params && task_specific_params[this.task]) { // Add prefixes, if present if (task_specific_params[this.task].prefix) { texts = texts.map(x => task_specific_params[this.task].prefix + x) } // TODO update generation config } const tokenizer = this.tokenizer; const tokenizer_options = { padding: true, truncation: true, } let inputs; if (this instanceof TranslationPipeline && '_build_translation_inputs' in tokenizer) { // TODO: move to Translation pipeline? // Currently put here to avoid code duplication // @ts-ignore inputs = tokenizer._build_translation_inputs(texts, tokenizer_options, generate_kwargs); } else { inputs = tokenizer(texts, tokenizer_options); } const outputTokenIds = await this.model.generate({ ...inputs, ...generate_kwargs }); return tokenizer.batch_decode(/** @type {Tensor} */(outputTokenIds), { skip_special_tokens: true, }).map(text => ({ [this._key]: text })); } } /** * @typedef {Object} SummarizationSingle * @property {string} summary_text The summary text. * @typedef {SummarizationSingle[]} SummarizationOutput * * @callback SummarizationPipelineCallback Summarize the text(s) given as inputs. * @param {string|string[]} texts One or several articles (or one list of articles) to summarize. * @param {import('./generation/configuration_utils.js').GenerationConfig} [options] Additional keyword arguments to pass along to the generate method of the model. * @returns {Promise<SummarizationOutput|SummarizationOutput[]>} * * @typedef {TextPipelineConstructorArgs & SummarizationPipelineCallback & Disposable} SummarizationPipelineType */ /** * A pipeline for summarization tasks, inheriting from Text2TextGenerationPipeline. * * **Example:** Summarization w/ `Xenova/distilbart-cnn-6-6`. * ```javascript * const generator = await pipeline('summarization', 'Xenova/distilbart-cnn-6-6'); * const text = 'The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, ' + * 'and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. ' + * 'During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest ' + * 'man-made structure in the world, a title it held for 41 years until the Chrysler Building in New ' + * 'York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to ' + * 'the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the ' + * 'Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second ' + * 'tallest free-standing structure in France after the Millau Viaduct.'; * const output = await generator(text, { * max_new_tokens: 100, * }); * // [{ summary_text: ' The Eiffel Tower is about the same height as an 81-storey building and the tallest structure in Paris. It is the second tallest free-standing structure in France after the Millau Viaduct.' }] * ``` */ export class SummarizationPipeline extends (/** @type {new (options: TextPipelineConstructorArgs) => SummarizationPipelineType} */ (/** @type {any} */ (Text2TextGenerationPipeline))) { /** @type {'summary_text'} */ _key = 'summary_text'; /** * Create a new SummarizationPipeline. * @param {TextPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } } /** * @typedef {Object} TranslationSingle * @property {string} translation_text The translated text. * @typedef {TranslationSingle[]} TranslationOutput * * @callback TranslationPipelineCallback Translate the text(s) given as inputs. * @param {string|string[]} texts Texts to be translated. * @param {import('./generation/configuration_utils.js').GenerationConfig} [options] Additional keyword arguments to pass along to the generate method of the model. * @returns {Promise<TranslationOutput|TranslationOutput[]>} * * @typedef {TextPipelineConstructorArgs & TranslationPipelineCallback & Disposable} TranslationPipelineType */ /** * Translates text from one language to another. * * **Example:** Multilingual translation w/ `Xenova/nllb-200-distilled-600M`. * * See [here](https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200) * for the full list of languages and their corresponding codes. * * ```javascript * const translator = await pipeline('translation', 'Xenova/nllb-200-distilled-600M'); * const output = await translator('जीवन एक चॉकलेट बॉक्स की तरह है।', { * src_lang: 'hin_Deva', // Hindi * tgt_lang: 'fra_Latn', // French * }); * // [{ translation_text: 'La vie est comme une boîte à chocolat.' }] * ``` * * **Example:** Multilingual translation w/ `Xenova/m2m100_418M`. * * See [here](https://huggingface.co/facebook/m2m100_418M#languages-covered) * for the full list of languages and their corresponding codes. * * ```javascript * const translator = await pipeline('translation', 'Xenova/m2m100_418M'); * const output = await translator('生活就像一盒巧克力。', { * src_lang: 'zh', // Chinese * tgt_lang: 'en', // English * }); * // [{ translation_text: 'Life is like a box of chocolate.' }] * ``` * * **Example:** Multilingual translation w/ `Xenova/mbart-large-50-many-to-many-mmt`. * * See [here](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt#languages-covered) * for the full list of languages and their corresponding codes. * * ```javascript * const translator = await pipeline('translation', 'Xenova/mbart-large-50-many-to-many-mmt'); * const output = await translator('संयुक्त राष्ट्र के प्रमुख का कहना है कि सीरिया में कोई सैन्य समाधान नहीं है', { * src_lang: 'hi_IN', // Hindi * tgt_lang: 'fr_XX', // French * }); * // [{ translation_text: 'Le chef des Nations affirme qu 'il n 'y a military solution in Syria.' }] * ``` */ export class TranslationPipeline extends (/** @type {new (options: TextPipelineConstructorArgs) => TranslationPipelineType} */ (/** @type {any} */ (Text2TextGenerationPipeline))) { /** @type {'translation_text'} */ _key = 'translation_text'; /** * Create a new TranslationPipeline. * @param {TextPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } } function isChat(x) { return Array.isArray(x) && x.every(x => 'role' in x && 'content' in x); } /** * @typedef {import('./tokenizers.js').Message[]} Chat * * @typedef {Object} TextGenerationSingle * @property {string|Chat} generated_text The generated text. * @typedef {TextGenerationSingle[]} TextGenerationOutput * * @typedef {Object} TextGenerationSpecificParams Parameters specific to text-generation pipelines. * @property {boolean} [add_special_tokens] Whether or not to add special tokens when tokenizing the sequences. * @property {boolean} [return_full_text=true] If set to `false` only added text is returned, otherwise the full text is returned. * @typedef {import('./generation/configuration_utils.js').GenerationConfig & TextGenerationSpecificParams} TextGenerationConfig * * @callback TextGenerationPipelineCallback Complete the prompt(s) given as inputs. * @param {string|string[]|Chat|Chat[]} texts One or several prompts (or one list of prompts) to complete. * @param {Partial<TextGenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model. * @returns {Promise<TextGenerationOutput|TextGenerationOutput[]>} An array or object containing the generated texts. * * @typedef {TextPipelineConstructorArgs & TextGenerationPipelineCallback & Disposable} TextGenerationPipelineType */ /** * Language generation pipeline using any `ModelWithLMHead` or `ModelForCausalLM`. * This pipeline predicts the words that will follow a specified text prompt. * NOTE: For the full list of generation parameters, see [`GenerationConfig`](./utils/generation#module_utils/generation.GenerationConfig). * * **Example:** Text generation with `Xenova/distilgpt2` (default settings). * ```javascript * const generator = await pipeline('text-generation', 'Xenova/distilgpt2'); * const text = 'I enjoy walking with my cute dog,'; * const output = await generator(text); * // [{ generated_text: "I enjoy walking with my cute dog, and I love to play with the other dogs." }] * ``` * * **Example:** Text generation with `Xenova/distilgpt2` (custom settings). * ```javascript * const generator = await pipeline('text-generation', 'Xenova/distilgpt2'); * const text = 'Once upon a time, there was'; * const output = await generator(text, { * temperature: 2, * max_new_tokens: 10, * repetition_penalty: 1.5, * no_repeat_ngram_size: 2, * num_beams: 2, * num_return_sequences: 2, * }); * // [{ * // "generated_text": "Once upon a time, there was an abundance of information about the history and activities that" * // }, { * // "generated_text": "Once upon a time, there was an abundance of information about the most important and influential" * // }] * ``` * * **Example:** Run code generation with `Xenova/codegen-350M-mono`. * ```javascript * const generator = await pipeline('text-generation', 'Xenova/codegen-350M-mono'); * const text = 'def fib(n):'; * const output = await generator(text, { * max_new_tokens: 44, * }); * // [{ * // generated_text: 'def fib(n):\n' + * // ' if n == 0:\n' + * // ' return 0\n' + * // ' elif n == 1:\n' + * // ' return 1\n' + * // ' else:\n' + * // ' return fib(n-1) + fib(n-2)\n' * // }] * ``` */ export class TextGenerationPipeline extends (/** @type {new (options: TextPipelineConstructorArgs) => TextGenerationPipelineType} */ (Pipeline)) { /** * Create a new TextGenerationPipeline. * @param {TextPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {TextGenerationPipelineCallback} */ async _call(texts, generate_kwargs = {}) { let isBatched = false; let isChatInput = false; // By default, do not add special tokens, unless the tokenizer specifies otherwise let add_special_tokens = generate_kwargs.add_special_tokens ?? (this.tokenizer.add_bos_token || this.tokenizer.add_eos_token) ?? false; // Normalize inputs /** @type {string[]} */ let inputs; if (typeof texts === 'string') { inputs = texts = [texts]; } else if (Array.isArray(texts) && texts.every(x => typeof x === 'string')) { isBatched = true; inputs = /** @type {string[]} */(texts); } else { if (isChat(texts)) { texts = [/** @type {Chat} */(texts)]; } else if (Array.isArray(texts) && texts.every(isChat)) { isBatched = true; } else { throw new Error('Input must be a string, an array of strings, a Chat, or an array of Chats'); } isChatInput = true; // If the input is a chat, we need to apply the chat template inputs = /** @type {string[]} */(/** @type {Chat[]} */ (texts).map( x => this.tokenizer.apply_chat_template(x, { tokenize: false, add_generation_prompt: true, }) )); add_special_tokens = false; // Chat template handles this already } // By default, return full text const return_full_text = isChatInput ? false : generate_kwargs.return_full_text ?? true; this.tokenizer.padding_side = 'left'; const text_inputs = this.tokenizer(inputs, { add_special_tokens, padding: true, truncation: true, }); const outputTokenIds = /** @type {Tensor} */(await this.model.generate({ ...text_inputs, ...generate_kwargs })); const decoded = this.tokenizer.batch_decode(outputTokenIds, { skip_special_tokens: true, }); let promptLengths; if (!return_full_text && text_inputs.input_ids.dims.at(-1) > 0) { promptLengths = this.tokenizer.batch_decode(text_inputs.input_ids, { skip_special_tokens: true, }).map(x => x.length); } /** @type {TextGenerationOutput[]} */ const toReturn = Array.from({ length: texts.length }, _ => []); for (let i = 0; i < decoded.length; ++i) { const textIndex = Math.floor(i / outputTokenIds.dims[0] * texts.length); if (promptLengths) { // Trim the decoded text to only include the generated part decoded[i] = decoded[i].slice(promptLengths[textIndex]); } toReturn[textIndex].push({ generated_text: isChatInput ? [ ...((/** @type {Chat[]} */(texts)[textIndex])), { role: 'assistant', content: decoded[i] }, ] : decoded[i] }); } return (!isBatched && toReturn.length === 1) ? toReturn[0] : toReturn; } } /** * @typedef {Object} ZeroShotClassificationOutput * @property {string} sequence The sequence for which this is the output. * @property {string[]} labels The labels sorted by order of likelihood. * @property {number[]} scores The probabilities for each of the labels. * * @typedef {Object} ZeroShotClassificationPipelineOptions Parameters specific to zero-shot classification pipelines. * @property {string} [hypothesis_template="This example is {}."] The template used to turn each * candidate label into an NLI-style hypothesis. The candidate label will replace the {} placeholder. * @property {boolean} [multi_label=false] Whether or not multiple candidate labels can be true. * If `false`, the scores are normalized such that the sum of the label likelihoods for each sequence * is 1. If `true`, the labels are considered independent and probabilities are normalized for each * candidate by doing a softmax of the entailment score vs. the contradiction score. * * @callback ZeroShotClassificationPipelineCallback Classify the sequence(s) given as inputs. * @param {string|string[]} texts The sequence(s) to classify, will be truncated if the model input is too large. * @param {string|string[]} candidate_labels The set of possible class labels to classify each sequence into. * Can be a single label, a string of comma-separated labels, or a list of labels. * @param {ZeroShotClassificationPipelineOptions} [options] The options to use for zero-shot classification. * @returns {Promise<ZeroShotClassificationOutput|ZeroShotClassificationOutput[]>} An array or object containing the predicted labels and scores. * * @typedef {TextPipelineConstructorArgs & ZeroShotClassificationPipelineCallback & Disposable} ZeroShotClassificationPipelineType */ /** * NLI-based zero-shot classification pipeline using a `ModelForSequenceClassification` * trained on NLI (natural language inference) tasks. Equivalent of `text-classification` * pipelines, but these models don't require a hardcoded number of potential classes, they * can be chosen at runtime. It usually means it's slower but it is **much** more flexible. * * **Example:** Zero shot classification with `Xenova/mobilebert-uncased-mnli`. * ```javascript * const classifier = await pipeline('zero-shot-classification', 'Xenova/mobilebert-uncased-mnli'); * const text = 'Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.'; * const labels = [ 'mobile', 'billing', 'website', 'account access' ]; * const output = await classifier(text, labels); * // { * // sequence: 'Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.', * // labels: [ 'mobile', 'website', 'billing', 'account access' ], * // scores: [ 0.5562091040482018, 0.1843621307860853, 0.13942646639336376, 0.12000229877234923 ] * // } * ``` * * **Example:** Zero shot classification with `Xenova/nli-deberta-v3-xsmall` (multi-label). * ```javascript * const classifier = await pipeline('zero-shot-classification', 'Xenova/nli-deberta-v3-xsmall'); * const text = 'I have a problem with my iphone that needs to be resolved asap!'; * const labels = [ 'urgent', 'not urgent', 'phone', 'tablet', 'computer' ]; * const output = await classifier(text, labels, { multi_label: true }); * // { * // sequence: 'I have a problem with my iphone that needs to be resolved asap!', * // labels: [ 'urgent', 'phone', 'computer', 'tablet', 'not urgent' ], * // scores: [ 0.9958870956360275, 0.9923963400697035, 0.002333537946160235, 0.0015134138567598765, 0.0010699384208377163 ] * // } * ``` */ export class ZeroShotClassificationPipeline extends (/** @type {new (options: TextPipelineConstructorArgs) => ZeroShotClassificationPipelineType} */ (Pipeline)) { /** * Create a new ZeroShotClassificationPipeline. * @param {TextPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); // Use model config to get label2id mapping this.label2id = Object.fromEntries( Object.entries((/** @type {any} */(this).model).config.label2id).map( ([k, v]) => [k.toLowerCase(), v] ) ); this.entailment_id = this.label2id['entailment']; if (this.entailment_id === undefined) { console.warn("Could not find 'entailment' in label2id mapping. Using 2 as entailment_id."); this.entailment_id = 2; } this.contradiction_id = this.label2id['contradiction'] ?? this.label2id['not_entailment']; if (this.contradiction_id === undefined) { console.warn("Could not find 'contradiction' in label2id mapping. Using 0 as contradiction_id."); this.contradiction_id = 0; } } /** @type {ZeroShotClassificationPipelineCallback} */ async _call(texts, candidate_labels, { hypothesis_template = "This example is {}.", multi_label = false, } = {}) { const isBatched = Array.isArray(texts); if (!isBatched) { texts = [/** @type {string} */ (texts)]; } if (!Array.isArray(candidate_labels)) { candidate_labels = [candidate_labels]; } // Insert labels into hypothesis template const hypotheses = candidate_labels.map( x => hypothesis_template.replace('{}', x) ); // How to perform the softmax over the logits: // - true: softmax over the entailment vs. contradiction dim for each label independently // - false: softmax the "entailment" logits over all candidate labels const softmaxEach = multi_label || candidate_labels.length === 1; /** @type {ZeroShotClassificationOutput[]} */ const toReturn = []; for (const premise of texts) { const entails_logits = []; for (const hypothesis of hypotheses) { const inputs = this.tokenizer(premise, { text_pair: hypothesis, padding: true, truncation: true, }) const outputs = await this.model(inputs) if (softmaxEach) { entails_logits.push([ outputs.logits.data[this.contradiction_id], outputs.logits.data[this.entailment_id] ]) } else { entails_logits.push(outputs.logits.data[this.entailment_id]) } } /** @type {number[]} */ const scores = softmaxEach ? entails_logits.map(x => softmax(x)[1]) : softmax(entails_logits); // Sort by scores (desc) and return scores with indices const scores_sorted = scores .map((x, i) => [x, i]) .sort((a, b) => (b[0] - a[0])); toReturn.push({ sequence: premise, labels: scores_sorted.map(x => candidate_labels[x[1]]), scores: scores_sorted.map(x => x[0]), }); } return isBatched ? toReturn : toReturn[0]; } } /** * @typedef {Object} FeatureExtractionPipelineOptions Parameters specific to feature extraction pipelines. * @property {'none'|'mean'|'cls'|'first_token'|'eos'|'last_token'} [pooling="none"] The pooling method to use. * @property {boolean} [normalize=false] Whether or not to normalize the embeddings in the last dimension. * @property {boolean} [quantize=false] Whether or not to quantize the embeddings. * @property {'binary'|'ubinary'} [precision='binary'] The precision to use for quantization. * * @callback FeatureExtractionPipelineCallback Extract the features of the input(s). * @param {string|string[]} texts One or several texts (or one list of texts) to get the features of. * @param {FeatureExtractionPipelineOptions} [options] The options to use for feature extraction. * @returns {Promise<Tensor>} The features computed by the model. * * @typedef {TextPipelineConstructorArgs & FeatureExtractionPipelineCallback & Disposable} FeatureExtractionPipelineType */ /** * Feature extraction pipeline using no model head. This pipeline extracts the hidden * states from the base transformer, which can be used as features in downstream tasks. * * **Example:** Run feature extraction with `bert-base-uncased` (without pooling/normalization). * ```javascript * const extractor = await pipeline('feature-extraction', 'Xenova/bert-base-uncased', { revision: 'default' }); * const output = await extractor('This is a simple test.'); * // Tensor { * // type: 'float32', * // data: Float32Array [0.05939924716949463, 0.021655935794115067, ...], * // dims: [1, 8, 768] * // } * ``` * * **Example:** Run feature extraction with `bert-base-uncased` (with pooling/normalization). * ```javascript * const extractor = await pipeline('feature-extraction', 'Xenova/bert-base-uncased', { revision: 'default' }); * const output = await extractor('This is a simple test.', { pooling: 'mean', normalize: true }); * // Tensor { * // type: 'float32', * // data: Float32Array [0.03373778983950615, -0.010106077417731285, ...], * // dims: [1, 768] * // } * ``` * * **Example:** Calculating embeddings with `sentence-transformers` models. * ```javascript * const extractor = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2'); * const output = await extractor('This is a simple test.', { pooling: 'mean', normalize: true }); * // Tensor { * // type: 'float32', * // data: Float32Array [0.09094982594251633, -0.014774246141314507, ...], * // dims: [1, 384] * // } * ``` * **Example:** Calculating binary embeddings with `sentence-transformers` models. * ```javascript * const extractor = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2'); * const output = await extractor('This is a simple test.', { pooling: 'mean', quantize: true, precision: 'binary' }); * // Tensor { * // type: 'int8', * // data: Int8Array [49, 108, 24, ...], * // dims: [1, 48] * // } * ``` */ export class FeatureExtractionPipeline extends (/** @type {new (options: TextPipelineConstructorArgs) => FeatureExtractionPipelineType} */ (Pipeline)) { /** * Create a new FeatureExtractionPipeline. * @param {TextPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {FeatureExtractionPipelineCallback} */ async _call(texts, { pooling = /** @type {'none'} */('none'), normalize = false, quantize = false, precision = /** @type {'binary'} */('binary'), } = {}) { // Run tokenization const model_inputs = this.tokenizer(texts, { padding: true, truncation: true, }); // Run model const outputs = await this.model(model_inputs) // TODO: Provide warning to the user that they might be using model which was not exported // specifically for feature extraction // console.log(this.model.config) // console.log(outputs) /** @type {Tensor} */ let result = outputs.last_hidden_state ?? outputs.logits ?? outputs.token_embeddings; switch (pooling) { case 'none': // Skip pooling break; case 'mean': result = mean_pooling(result, model_inputs.attention_mask); break; case 'first_token': case 'cls': result = result.slice(null, 0); break; case 'last_token': case 'eos': result = result.slice(null, -1); break; default: throw Error(`Pooling method '${pooling}' not supported.`); } if (normalize) { result = result.normalize(2, -1); } if (quantize) { result = quantize_embeddings(result, precision); } return result; } } /** * @typedef {Object} ImageFeatureExtractionPipelineOptions Parameters specific to image feature extraction pipelines. * @property {boolean} [pool=null] Whether or not to return the pooled output. If set to `false`, the model will return the raw hidden states. * * @callback ImageFeatureExtractionPipelineCallback Extract the features of the input(s). * @param {ImagePipelineInputs} images One or several images (or one list of images) to get the features of. * @param {ImageFeatureExtractionPipelineOptions} [options] The options to use for image feature extraction. * @returns {Promise<Tensor>} The image features computed by the model. * * @typedef {ImagePipelineConstructorArgs & ImageFeatureExtractionPipelineCallback & Disposable} ImageFeatureExtractionPipelineType */ /** * Image feature extraction pipeline using no model head. This pipeline extracts the hidden * states from the base transformer, which can be used as features in downstream tasks. * * **Example:** Perform image feature extraction with `Xenova/vit-base-patch16-224-in21k`. * ```javascript * const image_feature_extractor = await pipeline('image-feature-extraction', 'Xenova/vit-base-patch16-224-in21k'); * const url = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.png'; * const features = await image_feature_extractor(url); * // Tensor { * // dims: [ 1, 197, 768 ], * // type: 'float32', * // data: Float32Array(151296) [ ... ], * // size: 151296 * // } * ``` * * **Example:** Compute image embeddings with `Xenova/clip-vit-base-patch32`. * ```javascript * const image_feature_extractor = await pipeline('image-feature-extraction', 'Xenova/clip-vit-base-patch32'); * const url = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.png'; * const features = await image_feature_extractor(url); * // Tensor { * // dims: [ 1, 512 ], * // type: 'float32', * // data: Float32Array(512) [ ... ], * // size: 512 * // } * ``` */ export class ImageFeatureExtractionPipeline extends (/** @type {new (options: ImagePipelineConstructorArgs) => ImageFeatureExtractionPipelineType} */ (Pipeline)) { /** * Create a new ImageFeatureExtractionPipeline. * @param {ImagePipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {ImageFeatureExtractionPipelineCallback} */ async _call(images, { pool = null, } = {}) { const preparedImages = await prepareImages(images); const { pixel_values } = await this.processor(preparedImages); const outputs = await this.model({ pixel_values }); /** @type {Tensor} */ let result; if (pool) { if (!('pooler_output' in outputs)) { throw Error(`No pooled output was returned. Make sure the model has a 'pooler' layer when using the 'pool' option.`); } result = outputs.pooler_output; } else { result = outputs.last_hidden_state ?? outputs.logits ?? outputs.image_embeds; } return result; } } // TODO // export class SentenceSimilarityPipeline extends Pipeline { // } /** * @typedef {Object} AudioClassificationSingle * @property {string} label The label predicted. * @property {number} score The corresponding probability. * @typedef {AudioClassificationSingle[]} AudioClassificationOutput * * @typedef {Object} AudioClassificationPipelineOptions Parameters specific to audio classification pipelines. * @property {number} [top_k=5] The number of top labels that will be returned by the pipeline. * If the provided number is `null` or higher than the number of labels available in the model configuration, * it will default to the number of labels. * * @callback AudioClassificationPipelineCallback Classify the sequence(s) given as inputs. * @param {AudioPipelineInputs} audio The input audio file(s) to be classified. The input is either: * - `string` or `URL` that is the filename/URL of the audio file, the file will be read at the processor's sampling rate * to get the waveform using the [`AudioContext`](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext) API. * If `AudioContext` is not available, you should pass the raw waveform in as a Float32Array of shape `(n, )`. * - `Float32Array` or `Float64Array` of shape `(n, )`, representing the raw audio at the correct sampling rate (no further check will be done). * @param {AudioClassificationPipelineOptions} [options] The options to use for audio classification. * @returns {Promise<AudioClassificationOutput|AudioClassificationOutput[]>} An array or object containing the predicted labels and scores. * * @typedef {AudioPipelineConstructorArgs & AudioClassificationPipelineCallback & Disposable} AudioClassificationPipelineType */ /** * Audio classification pipeline using any `AutoModelForAudioClassification`. * This pipeline predicts the class of a raw waveform or an audio file. * * **Example:** Perform audio classification with `Xenova/wav2vec2-large-xlsr-53-gender-recognition-librispeech`. * ```javascript * const classifier = await pipeline('audio-classification', 'Xenova/wav2vec2-large-xlsr-53-gender-recognition-librispeech'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; * const output = await classifier(url); * // [ * // { label: 'male', score: 0.9981542229652405 }, * // { label: 'female', score: 0.001845747814513743 } * // ] * ``` * * **Example:** Perform audio classification with `Xenova/ast-finetuned-audioset-10-10-0.4593` and return top 4 results. * ```javascript * const classifier = await pipeline('audio-classification', 'Xenova/ast-finetuned-audioset-10-10-0.4593'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/cat_meow.wav'; * const output = await classifier(url, { top_k: 4 }); * // [ * // { label: 'Meow', score: 0.5617874264717102 }, * // { label: 'Cat', score: 0.22365376353263855 }, * // { label: 'Domestic animals, pets', score: 0.1141069084405899 }, * // { label: 'Animal', score: 0.08985692262649536 }, * // ] * ``` */ export class AudioClassificationPipeline extends (/** @type {new (options: AudioPipelineConstructorArgs) => AudioClassificationPipelineType} */ (Pipeline)) { /** * Create a new AudioClassificationPipeline. * @param {AudioPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {AudioClassificationPipelineCallback} */ async _call(audio, { top_k = 5 } = {}) { const sampling_rate = this.processor.feature_extractor.config.sampling_rate; const preparedAudios = await prepareAudios(audio, sampling_rate); // @ts-expect-error TS2339 const id2label = this.model.config.id2label; const toReturn = []; for (const aud of preparedAudios) { const inputs = await this.processor(aud); const output = await this.model(inputs); const logits = output.logits[0]; const scores = await topk(new Tensor( 'float32', softmax(logits.data), logits.dims, ), top_k); const values = scores[0].tolist(); const indices = scores[1].tolist(); const vals = indices.map((x, i) => ({ label: /** @type {string} */ (id2label ? id2label[x] : `LABEL_${x}`), score: /** @type {number} */ (values[i]), })); toReturn.push(vals); }; return Array.isArray(audio) ? toReturn : toReturn[0]; } } /** * @typedef {Object} ZeroShotAudioClassificationOutput * @property {string} label The label identified by the model. It is one of the suggested `candidate_label`. * @property {number} score The score attributed by the model for that label (between 0 and 1). * * @typedef {Object} ZeroShotAudioClassificationPipelineOptions Parameters specific to zero-shot audio classification pipelines. * @property {string} [hypothesis_template="This is a sound of {}."] The sentence used in conjunction with `candidate_labels` * to attempt the audio classification by replacing the placeholder with the candidate_labels. * Then likelihood is estimated by using `logits_per_audio`. * * @callback ZeroShotAudioClassificationPipelineCallback Classify the sequence(s) given as inputs. * @param {AudioPipelineInputs} audio The input audio file(s) to be classified. The input is either: * - `string` or `URL` that is the filename/URL of the audio file, the file will be read at the processor's sampling rate * to get the waveform using the [`AudioContext`](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext) API. * If `AudioContext` is not available, you should pass the raw waveform in as a Float32Array of shape `(n, )`. * - `Float32Array` or `Float64Array` of shape `(n, )`, representing the raw audio at the correct sampling rate (no further check will be done). * @param {string[]} candidate_labels The candidate labels for this audio. * @param {ZeroShotAudioClassificationPipelineOptions} [options] The options to use for zero-shot audio classification. * @returns {Promise<ZeroShotAudioClassificationOutput[]|ZeroShotAudioClassificationOutput[][]>} An array of objects containing the predicted labels and scores. * * @typedef {TextAudioPipelineConstructorArgs & ZeroShotAudioClassificationPipelineCallback & Disposable} ZeroShotAudioClassificationPipelineType */ /** * Zero shot audio classification pipeline using `ClapModel`. This pipeline predicts the class of an audio when you * provide an audio and a set of `candidate_labels`. * * **Example**: Perform zero-shot audio classification with `Xenova/clap-htsat-unfused`. * ```javascript * const classifier = await pipeline('zero-shot-audio-classification', 'Xenova/clap-htsat-unfused'); * const audio = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/dog_barking.wav'; * const candidate_labels = ['dog', 'vaccum cleaner']; * const scores = await classifier(audio, candidate_labels); * // [ * // { score: 0.9993992447853088, label: 'dog' }, * // { score: 0.0006007603369653225, label: 'vaccum cleaner' } * // ] * ``` */ export class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: TextAudioPipelineConstructorArgs) => ZeroShotAudioClassificationPipelineType} */ (Pipeline)) { /** * Create a new ZeroShotAudioClassificationPipeline. * @param {TextAudioPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {ZeroShotAudioClassificationPipelineCallback} */ async _call(audio, candidate_labels, { hypothesis_template = "This is a sound of {}." } = {}) { const single = !Array.isArray(audio); if (single) { audio = [/** @type {AudioInput} */ (audio)]; } // Insert label into hypothesis template const texts = candidate_labels.map( x => hypothesis_template.replace('{}', x) ); // Run tokenization const text_inputs = this.tokenizer(texts, { padding: true, truncation: true, }); const sampling_rate = this.processor.feature_extractor.config.sampling_rate; const preparedAudios = await prepareAudios(audio, sampling_rate); const toReturn = []; for (const aud of preparedAudios) { const audio_inputs = await this.processor(aud); // Run model with both text and audio inputs const output = await this.model({ ...text_inputs, ...audio_inputs }); // Compute softmax per audio const probs = softmax(output.logits_per_audio.data); toReturn.push([...probs].map((x, i) => ({ score: x, label: candidate_labels[i] }))); } return single ? toReturn[0] : toReturn; } } /** * @typedef {Object} Chunk * @property {[number, number]} timestamp The start and end timestamp of the chunk in seconds. * @property {string} text The recognized text. */ /** * @typedef {Object} AutomaticSpeechRecognitionOutput * @property {string} text The recognized text. * @property {Chunk[]} [chunks] When using `return_timestamps`, the `chunks` will become a list * containing all the various text chunks identified by the model. * * @typedef {Object} AutomaticSpeechRecognitionSpecificParams Parameters specific to automatic-speech-recognition pipelines. * @property {boolean|'word'} [return_timestamps] Whether to return timestamps or not. Default is `false`. * @property {number} [chunk_length_s] The length of audio chunks to process in seconds. Default is 0 (no chunking). * @property {number} [stride_length_s] The length of overlap between consecutive audio chunks in seconds. If not provided, defaults to `chunk_length_s / 6`. * @property {boolean} [force_full_sequences] Whether to force outputting full sequences or not. Default is `false`. * @property {string} [language] The source language. Default is `null`, meaning it should be auto-detected. Use this to potentially improve performance if the source language is known. * @property {string} [task] The task to perform. Default is `null`, meaning it should be auto-detected. * @property {number} [num_frames] The number of frames in the input audio. * @typedef {import('./generation/configuration_utils.js').GenerationConfig & AutomaticSpeechRecognitionSpecificParams} AutomaticSpeechRecognitionConfig * * @callback AutomaticSpeechRecognitionPipelineCallback Transcribe the audio sequence(s) given as inputs to text. * @param {AudioPipelineInputs} audio The input audio file(s) to be transcribed. The input is either: * - `string` or `URL` that is the filename/URL of the audio file, the file will be read at the processor's sampling rate * to get the waveform using the [`AudioContext`](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext) API. * If `AudioContext` is not available, you should pass the raw waveform in as a Float32Array of shape `(n, )`. * - `Float32Array` or `Float64Array` of shape `(n, )`, representing the raw audio at the correct sampling rate (no further check will be done). * @param {Partial<AutomaticSpeechRecognitionConfig>} [options] Additional keyword arguments to pass along to the generate method of the model. * @returns {Promise<AutomaticSpeechRecognitionOutput|AutomaticSpeechRecognitionOutput[]>} An object containing the transcription text and optionally timestamps if `return_timestamps` is `true`. * * @typedef {TextAudioPipelineConstructorArgs & AutomaticSpeechRecognitionPipelineCallback & Disposable} AutomaticSpeechRecognitionPipelineType */ /** * Pipeline that aims at extracting spoken text contained within some audio. * * **Example:** Transcribe English. * ```javascript * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; * const output = await transcriber(url); * // { text: " And so my fellow Americans ask not what your country can do for you, ask what you can do for your country." } * ``` * * **Example:** Transcribe English w/ timestamps. * ```javascript * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; * const output = await transcriber(url, { return_timestamps: true }); * // { * // text: " And so my fellow Americans ask not what your country can do for you, ask what you can do for your country." * // chunks: [ * // { timestamp: [0, 8], text: " And so my fellow Americans ask not what your country can do for you" } * // { timestamp: [8, 11], text: " ask what you can do for your country." } * // ] * // } * ``` * * **Example:** Transcribe English w/ word-level timestamps. * ```javascript * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; * const output = await transcriber(url, { return_timestamps: 'word' }); * // { * // "text": " And so my fellow Americans ask not what your country can do for you ask what you can do for your country.", * // "chunks": [ * // { "text": " And", "timestamp": [0, 0.78] }, * // { "text": " so", "timestamp": [0.78, 1.06] }, * // { "text": " my", "timestamp": [1.06, 1.46] }, * // ... * // { "text": " for", "timestamp": [9.72, 9.92] }, * // { "text": " your", "timestamp": [9.92, 10.22] }, * // { "text": " country.", "timestamp": [10.22, 13.5] } * // ] * // } * ``` * * **Example:** Transcribe French. * ```javascript * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-small'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/french-audio.mp3'; * const output = await transcriber(url, { language: 'french', task: 'transcribe' }); * // { text: " J'adore, j'aime, je n'aime pas, je déteste." } * ``` * * **Example:** Translate French to English. * ```javascript * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-small'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/french-audio.mp3'; * const output = await transcriber(url, { language: 'french', task: 'translate' }); * // { text: " I love, I like, I don't like, I hate." } * ``` * * **Example:** Transcribe/translate audio longer than 30 seconds. * ```javascript * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/ted_60.wav'; * const output = await transcriber(url, { chunk_length_s: 30, stride_length_s: 5 }); * // { text: " So in college, I was a government major, which means [...] So I'd start off light and I'd bump it up" } * ``` */ export class AutomaticSpeechRecognitionPipeline extends (/** @type {new (options: TextAudioPipelineConstructorArgs) => AutomaticSpeechRecognitionPipelineType} */ (Pipeline)) { /** * Create a new AutomaticSpeechRecognitionPipeline. * @param {TextAudioPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {AutomaticSpeechRecognitionPipelineCallback} */ async _call(audio, kwargs = {}) { switch (this.model.config.model_type) { case 'whisper': case 'lite-whisper': return this._call_whisper(audio, kwargs) case 'wav2vec2': case 'wav2vec2-bert': case 'unispeech': case 'unispeech-sat': case 'hubert': return this._call_wav2vec2(audio, kwargs) case 'moonshine': return this._call_moonshine(audio, kwargs) default: throw new Error(`AutomaticSpeechRecognitionPipeline does not support model type '${this.model.config.model_type}'.`) } } /** * @type {AutomaticSpeechRecognitionPipelineCallback} * @private */ async _call_wav2vec2(audio, kwargs) { // TODO use kwargs if (kwargs.language) { console.warn('`language` parameter is not yet supported for `wav2vec2` models, defaulting to "English".'); } if (kwargs.task) { console.warn('`task` parameter is not yet supported for `wav2vec2` models, defaulting to "transcribe".'); } const single = !Array.isArray(audio); if (single) { audio = [/** @type {AudioInput} */ (audio)]; } const sampling_rate = this.processor.feature_extractor.config.sampling_rate; const preparedAudios = await prepareAudios(audio, sampling_rate); const toReturn = []; for (const aud of preparedAudios) { const inputs = await this.processor(aud); const output = await this.model(inputs); const logits = output.logits[0]; const predicted_ids = []; for (const item of logits) { predicted_ids.push(max(item.data)[1]) } const predicted_sentences = this.tokenizer.decode(predicted_ids) toReturn.push({ text: predicted_sentences }) } return single ? toReturn[0] : toReturn; } /** * @type {AutomaticSpeechRecognitionPipelineCallback} * @private */ async _call_whisper(audio, kwargs) { const return_timestamps = kwargs.return_timestamps ?? false; const chunk_length_s = kwargs.chunk_length_s ?? 0; const force_full_sequences = kwargs.force_full_sequences ?? false; let stride_length_s = kwargs.stride_length_s ?? null; const generation_config = { ...kwargs } if (return_timestamps === 'word') { generation_config['return_token_timestamps'] = true; generation_config['return_timestamps'] = false; // Do not predict timestamp tokens } const single = !Array.isArray(audio); if (single) { audio = [/** @type {AudioInput} */ (audio)]; } // @ts-expect-error TS2339 const time_precision = this.processor.feature_extractor.config.chunk_length / this.model.config.max_source_positions; const hop_length = this.processor.feature_extractor.config.hop_length; const sampling_rate = this.processor.feature_extractor.config.sampling_rate; const preparedAudios = await prepareAudios(audio, sampling_rate); const toReturn = []; for (const aud of preparedAudios) { /** @type {{stride: number[], input_features: Tensor, is_last: boolean, tokens?: bigint[], token_timestamps?: number[]}[]} */ let chunks = []; if (chunk_length_s > 0) { if (stride_length_s === null) { stride_length_s = chunk_length_s / 6; } else if (chunk_length_s <= stride_length_s) { throw Error("`chunk_length_s` must be larger than `stride_length_s`.") } // TODO support different stride_length_s (for left and right) const window = sampling_rate * chunk_length_s; const stride = sampling_rate * stride_length_s; const jump = window - 2 * stride; let offset = 0; // Create subarrays of audio with overlaps while (true) { const offset_end = offset + window; const subarr = aud.subarray(offset, offset_end); const feature = await this.processor(subarr); const is_first = offset === 0; const is_last = offset_end >= aud.length; chunks.push({ stride: [ subarr.length, is_first ? 0 : stride, is_last ? 0 : stride ], input_features: feature.input_features, is_last, }) if (is_last) break; offset += jump; } } else { chunks = [{ stride: [aud.length, 0, 0], input_features: (await this.processor(aud)).input_features, is_last: true }] } // Generate for each set of input features for (const chunk of chunks) { generation_config.num_frames = Math.floor(chunk.stride[0] / hop_length); // NOTE: doing sequentially for now const data = await this.model.generate({ inputs: chunk.input_features, ...generation_config }); // TODO: Right now we only get top beam if (return_timestamps === 'word') { // @ts-expect-error TS2339 chunk.tokens = data.sequences.tolist()[0]; // @ts-expect-error TS2339 chunk.token_timestamps = data.token_timestamps.tolist()[0].map( (/** @type {number} */ x) => round(x, 2) ); } else { chunk.tokens = (/** @type {Tensor} */(data))[0].tolist(); } // convert stride to seconds chunk.stride = chunk.stride.map(x => x / sampling_rate); } // Merge text chunks // @ts-ignore const [full_text, optional] = this.tokenizer._decode_asr(chunks, { time_precision, return_timestamps, force_full_sequences }); toReturn.push({ text: full_text, ...optional }) } return single ? toReturn[0] : toReturn; } /** * @type {AutomaticSpeechRecognitionPipelineCallback} * @private */ async _call_moonshine(audio, kwargs) { const single = !Array.isArray(audio); if (single) { audio = [/** @type {AudioInput} */ (audio)]; } const sampling_rate = this.processor.feature_extractor.config.sampling_rate; const preparedAudios = await prepareAudios(audio, sampling_rate); const toReturn = []; for (const aud of preparedAudios) { const inputs = await this.processor(aud); // According to the [paper](https://huggingface.co/papers/2410.15608): // "We use greedy decoding, with a heuristic limit of 6 output tokens // per second of audio to avoid repeated output sequences." const max_new_tokens = Math.floor(aud.length / sampling_rate) * 6; const outputs = await this.model.generate({ max_new_tokens, ...kwargs, ...inputs }); const text = this.processor.batch_decode(/** @type {Tensor} */(outputs), { skip_special_tokens: true })[0]; toReturn.push({ text }); } return single ? toReturn[0] : toReturn; } } /** * @typedef {Object} ImageToTextSingle * @property {string} generated_text The generated text. * @typedef {ImageToTextSingle[]} ImageToTextOutput * * @callback ImageToTextPipelineCallback Assign labels to the image(s) passed as inputs. * @param {ImagePipelineInputs} texts The images to be captioned. * @param {Partial<import('./generation/configuration_utils.js').GenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model. * @returns {Promise<ImageToTextOutput|ImageToTextOutput[]>} An object (or array of objects) containing the generated text(s). * * @typedef {TextImagePipelineConstructorArgs & ImageToTextPipelineCallback & Disposable} ImageToTextPipelineType */ /** * Image To Text pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given image. * * **Example:** Generate a caption for an image w/ `Xenova/vit-gpt2-image-captioning`. * ```javascript * const captioner = await pipeline('image-to-text', 'Xenova/vit-gpt2-image-captioning'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/cats.jpg'; * const output = await captioner(url); * // [{ generated_text: 'a cat laying on a couch with another cat' }] * ``` * * **Example:** Optical Character Recognition (OCR) w/ `Xenova/trocr-small-handwritten`. * ```javascript * const captioner = await pipeline('image-to-text', 'Xenova/trocr-small-handwritten'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/handwriting.jpg'; * const output = await captioner(url); * // [{ generated_text: 'Mr. Brown commented icily.' }] * ``` */ export class ImageToTextPipeline extends (/** @type {new (options: TextImagePipelineConstructorArgs) => ImageToTextPipelineType} */ (Pipeline)) { /** * Create a new ImageToTextPipeline. * @param {TextImagePipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {ImageToTextPipelineCallback} */ async _call(images, generate_kwargs = {}) { const isBatched = Array.isArray(images); const preparedImages = await prepareImages(images); const { pixel_values } = await this.processor(preparedImages); const toReturn = []; for (const batch of pixel_values) { batch.dims = [1, ...batch.dims] const output = await this.model.generate({ inputs: batch, ...generate_kwargs }); const decoded = this.tokenizer.batch_decode(/** @type {Tensor} */(output), { skip_special_tokens: true, }).map(x => ({ generated_text: x.trim() })) toReturn.push(decoded); } return isBatched ? toReturn : toReturn[0]; } } /** * @typedef {Object} ImageClassificationSingle * @property {string} label The label identified by the model. * @property {number} score The score attributed by the model for that label. * @typedef {ImageClassificationSingle[]} ImageClassificationOutput * * @typedef {Object} ImageClassificationPipelineOptions Parameters specific to image classification pipelines. * @property {number} [top_k=1] The number of top labels that will be returned by the pipeline. * * @callback ImageClassificationPipelineCallback Assign labels to the image(s) passed as inputs. * @param {ImagePipelineInputs} images The input images(s) to be classified. * @param {ImageClassificationPipelineOptions} [options] The options to use for image classification. * @returns {Promise<ImageClassificationOutput|ImageClassificationOutput[]>} An array or object containing the predicted labels and scores. * * @typedef {ImagePipelineConstructorArgs & ImageClassificationPipelineCallback & Disposable} ImageClassificationPipelineType */ /** * Image classification pipeline using any `AutoModelForImageClassification`. * This pipeline predicts the class of an image. * * **Example:** Classify an image. * ```javascript * const classifier = await pipeline('image-classification', 'Xenova/vit-base-patch16-224'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/tiger.jpg'; * const output = await classifier(url); * // [ * // { label: 'tiger, Panthera tigris', score: 0.632695734500885 }, * // ] * ``` * * **Example:** Classify an image and return top `n` classes. * ```javascript * const classifier = await pipeline('image-classification', 'Xenova/vit-base-patch16-224'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/tiger.jpg'; * const output = await classifier(url, { top_k: 3 }); * // [ * // { label: 'tiger, Panthera tigris', score: 0.632695734500885 }, * // { label: 'tiger cat', score: 0.3634825646877289 }, * // { label: 'lion, king of beasts, Panthera leo', score: 0.00045060308184474707 }, * // ] * ``` * * **Example:** Classify an image and return all classes. * ```javascript * const classifier = await pipeline('image-classification', 'Xenova/vit-base-patch16-224'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/tiger.jpg'; * const output = await classifier(url, { top_k: 0 }); * // [ * // { label: 'tiger, Panthera tigris', score: 0.632695734500885 }, * // { label: 'tiger cat', score: 0.3634825646877289 }, * // { label: 'lion, king of beasts, Panthera leo', score: 0.00045060308184474707 }, * // { label: 'jaguar, panther, Panthera onca, Felis onca', score: 0.00035465499968267977 }, * // ... * // ] * ``` */ export class ImageClassificationPipeline extends (/** @type {new (options: ImagePipelineConstructorArgs) => ImageClassificationPipelineType} */ (Pipeline)) { /** * Create a new ImageClassificationPipeline. * @param {ImagePipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {ImageClassificationPipelineCallback} */ async _call(images, { top_k = 5 } = {}) { const preparedImages = await prepareImages(images); const { pixel_values } = await this.processor(preparedImages); const output = await this.model({ pixel_values }); // @ts-expect-error TS2339 const id2label = this.model.config.id2label; /** @type {ImageClassificationOutput[]} */ const toReturn = []; for (const batch of output.logits) { const scores = await topk(new Tensor( 'float32', softmax(batch.data), batch.dims, ), top_k); const values = scores[0].tolist(); const indices = scores[1].tolist(); const vals = indices.map((x, i) => ({ label: /** @type {string} */ (id2label ? id2label[x] : `LABEL_${x}`), score: /** @type {number} */ (values[i]), })); toReturn.push(vals); } return Array.isArray(images) ? toReturn : toReturn[0]; } } /** * @typedef {Object} ImageSegmentationPipelineOutput * @property {string|null} label The label of the segment. * @property {number|null} score The score of the segment. * @property {RawImage} mask The mask of the segment. * * @typedef {Object} ImageSegmentationPipelineOptions Parameters specific to image segmentation pipelines. * @property {number} [threshold=0.5] Probability threshold to filter out predicted masks. * @property {number} [mask_threshold=0.5] Threshold to use when turning the predicted masks into binary values. * @property {number} [overlap_mask_area_threshold=0.8] Mask overlap threshold to eliminate small, disconnected segments. * @property {null|string} [subtask=null] Segmentation task to be performed. One of [`panoptic`, `instance`, and `semantic`], * depending on model capabilities. If not set, the pipeline will attempt to resolve (in that order). * @property {number[]} [label_ids_to_fuse=null] List of label ids to fuse. If not set, do not fuse any labels. * @property {number[][]} [target_sizes=null] List of target sizes for the input images. If not set, use the original image sizes. * * @callback ImageSegmentationPipelineCallback Segment the input images. * @param {ImagePipelineInputs} images The input images. * @param {ImageSegmentationPipelineOptions} [options] The options to use for image segmentation. * @returns {Promise<ImageSegmentationPipelineOutput[]>} The annotated segments. * * @typedef {ImagePipelineConstructorArgs & ImageSegmentationPipelineCallback & Disposable} ImageSegmentationPipelineType */ /** * Image segmentation pipeline using any `AutoModelForXXXSegmentation`. * This pipeline predicts masks of objects and their classes. * * **Example:** Perform image segmentation with `Xenova/detr-resnet-50-panoptic`. * ```javascript * const segmenter = await pipeline('image-segmentation', 'Xenova/detr-resnet-50-panoptic'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/cats.jpg'; * const output = await segmenter(url); * // [ * // { label: 'remote', score: 0.9984649419784546, mask: RawImage { ... } }, * // { label: 'cat', score: 0.9994316101074219, mask: RawImage { ... } } * // ] * ``` */ export class ImageSegmentationPipeline extends (/** @type {new (options: ImagePipelineConstructorArgs) => ImageSegmentationPipelineType} */ (Pipeline)) { /** * Create a new ImageSegmentationPipeline. * @param {ImagePipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); this.subtasks_mapping = { // Mapping of subtasks to their corresponding post-processing function names. panoptic: 'post_process_panoptic_segmentation', instance: 'post_process_instance_segmentation', semantic: 'post_process_semantic_segmentation' } } /** @type {ImageSegmentationPipelineCallback} */ async _call(images, { threshold = 0.5, mask_threshold = 0.5, overlap_mask_area_threshold = 0.8, label_ids_to_fuse = null, target_sizes = null, subtask = null, } = {}) { const isBatched = Array.isArray(images); if (isBatched && images.length !== 1) { throw Error("Image segmentation pipeline currently only supports a batch size of 1."); } const preparedImages = await prepareImages(images); const imageSizes = preparedImages.map(x => [x.height, x.width]); const inputs = await this.processor(preparedImages); const { inputNames, outputNames } = this.model.sessions['model']; if (!inputNames.includes('pixel_values')) { if (inputNames.length !== 1) { throw Error(`Expected a single input name, but got ${inputNames.length} inputs: ${inputNames}.`); } const newName = inputNames[0]; if (newName in inputs) { throw Error(`Input name ${newName} already exists in the inputs.`); } // To ensure compatibility with certain background-removal models, // we may need to perform a mapping of input to output names inputs[newName] = inputs.pixel_values; } const output = await this.model(inputs); let fn = null; if (subtask !== null) { fn = this.subtasks_mapping[subtask]; } else if (this.processor.image_processor) { for (const [task, func] of Object.entries(this.subtasks_mapping)) { if (func in this.processor.image_processor) { fn = this.processor.image_processor[func].bind(this.processor.image_processor); subtask = task; break; } } } // @ts-expect-error TS2339 const id2label = this.model.config.id2label; /** @type {ImageSegmentationPipelineOutput[]} */ const annotation = []; if (!subtask) { // We define an epsilon to safeguard against numerical/precision issues when detecting // the normalization mode of the output (i.e., sigmoid already applied, or not). // See https://github.com/microsoft/onnxruntime/issues/23943 for more information. const epsilon = 1e-5; // Perform standard image segmentation const result = output[outputNames[0]]; for (let i = 0; i < imageSizes.length; ++i) { const size = imageSizes[i]; const item = result[i]; if (item.data.some(x => x < -epsilon || x > 1 + epsilon)) { item.sigmoid_(); } const mask = await RawImage.fromTensor(item.mul_(255).to('uint8')).resize(size[1], size[0]); annotation.push({ label: null, score: null, mask }); } } else if (subtask === 'panoptic' || subtask === 'instance') { const processed = fn( output, threshold, mask_threshold, overlap_mask_area_threshold, label_ids_to_fuse, target_sizes ?? imageSizes, // TODO FIX? )[0]; const segmentation = processed.segmentation; for (const segment of processed.segments_info) { const maskData = new Uint8ClampedArray(segmentation.data.length); for (let i = 0; i < segmentation.data.length; ++i) { if (segmentation.data[i] === segment.id) { maskData[i] = 255; } } const mask = new RawImage(maskData, segmentation.dims[1], segmentation.dims[0], 1) annotation.push({ score: segment.score, label: id2label[segment.label_id], mask: mask }) } } else if (subtask === 'semantic') { const { segmentation, labels } = fn(output, target_sizes ?? imageSizes)[0]; for (const label of labels) { const maskData = new Uint8ClampedArray(segmentation.data.length); for (let i = 0; i < segmentation.data.length; ++i) { if (segmentation.data[i] === label) { maskData[i] = 255; } } const mask = new RawImage(maskData, segmentation.dims[1], segmentation.dims[0], 1); annotation.push({ score: null, label: id2label[label], mask: mask }); } } else { throw Error(`Subtask ${subtask} not supported.`); } return annotation; } } /** * @typedef {Object} BackgroundRemovalPipelineOptions Parameters specific to image segmentation pipelines. * * @callback BackgroundRemovalPipelineCallback Segment the input images. * @param {ImagePipelineInputs} images The input images. * @param {BackgroundRemovalPipelineOptions} [options] The options to use for image segmentation. * @returns {Promise<RawImage[]>} The images with the background removed. * * @typedef {ImagePipelineConstructorArgs & BackgroundRemovalPipelineCallback & Disposable} BackgroundRemovalPipelineType */ /** * Background removal pipeline using certain `AutoModelForXXXSegmentation`. * This pipeline removes the backgrounds of images. * * **Example:** Perform background removal with `Xenova/modnet`. * ```javascript * const segmenter = await pipeline('background-removal', 'Xenova/modnet'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/portrait-of-woman_small.jpg'; * const output = await segmenter(url); * // [ * // RawImage { data: Uint8ClampedArray(648000) [ ... ], width: 360, height: 450, channels: 4 } * // ] * ``` */ export class BackgroundRemovalPipeline extends (/** @type {new (options: ImagePipelineConstructorArgs) => BackgroundRemovalPipelineType} */ (/** @type {any} */(ImageSegmentationPipeline))) { /** * Create a new BackgroundRemovalPipeline. * @param {ImagePipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {BackgroundRemovalPipelineCallback} */ async _call(images, options = {}) { const isBatched = Array.isArray(images); if (isBatched && images.length !== 1) { throw Error("Background removal pipeline currently only supports a batch size of 1."); } const preparedImages = await prepareImages(images); // @ts-expect-error TS2339 const masks = await super._call(images, options); const result = preparedImages.map((img, i) => { const cloned = img.clone(); cloned.putAlpha(masks[i].mask); return cloned; }); return result; } } /** * @typedef {Object} ZeroShotImageClassificationOutput * @property {string} label The label identified by the model. It is one of the suggested `candidate_label`. * @property {number} score The score attributed by the model for that label (between 0 and 1). * * @typedef {Object} ZeroShotImageClassificationPipelineOptions Parameters specific to zero-shot image classification pipelines. * @property {string} [hypothesis_template="This is a photo of {}"] The sentence used in conjunction with `candidate_labels` * to attempt the image classification by replacing the placeholder with the candidate_labels. * Then likelihood is estimated by using `logits_per_image`. * * @callback ZeroShotImageClassificationPipelineCallback Assign labels to the image(s) passed as inputs. * @param {ImagePipelineInputs} images The input images. * @param {string[]} candidate_labels The candidate labels for this image. * @param {ZeroShotImageClassificationPipelineOptions} [options] The options to use for zero-shot image classification. * @returns {Promise<ZeroShotImageClassificationOutput[]|ZeroShotImageClassificationOutput[][]>} An array of objects containing the predicted labels and scores. * * @typedef {TextImagePipelineConstructorArgs & ZeroShotImageClassificationPipelineCallback & Disposable} ZeroShotImageClassificationPipelineType */ /** * Zero shot image classification pipeline. This pipeline predicts the class of * an image when you provide an image and a set of `candidate_labels`. * * **Example:** Zero shot image classification w/ `Xenova/clip-vit-base-patch32`. * ```javascript * const classifier = await pipeline('zero-shot-image-classification', 'Xenova/clip-vit-base-patch32'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/tiger.jpg'; * const output = await classifier(url, ['tiger', 'horse', 'dog']); * // [ * // { score: 0.9993917942047119, label: 'tiger' }, * // { score: 0.0003519294841680676, label: 'horse' }, * // { score: 0.0002562698791734874, label: 'dog' } * // ] * ``` */ export class ZeroShotImageClassificationPipeline extends (/** @type {new (options: TextImagePipelineConstructorArgs) => ZeroShotImageClassificationPipelineType} */ (Pipeline)) { /** * Create a new ZeroShotImageClassificationPipeline. * @param {TextImagePipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {ZeroShotImageClassificationPipelineCallback} */ async _call(images, candidate_labels, { hypothesis_template = "This is a photo of {}" } = {}) { const isBatched = Array.isArray(images); const preparedImages = await prepareImages(images); // Insert label into hypothesis template const texts = candidate_labels.map( x => hypothesis_template.replace('{}', x) ); // Run tokenization const text_inputs = this.tokenizer(texts, { padding: this.model.config.model_type === 'siglip' ? 'max_length' : true, truncation: true, }); // Run processor const { pixel_values } = await this.processor(preparedImages); // Run model with both text and pixel inputs const output = await this.model({ ...text_inputs, pixel_values }); const function_to_apply = this.model.config.model_type === 'siglip' ? batch => batch.sigmoid().data : batch => softmax(batch.data); // Compare each image with each candidate label const toReturn = []; for (const batch of output.logits_per_image) { // Compute softmax per image const probs = function_to_apply(batch); const result = [...probs].map((x, i) => ({ score: x, label: candidate_labels[i] })); result.sort((a, b) => b.score - a.score); // sort by score in descending order toReturn.push(result); } return isBatched ? toReturn : toReturn[0]; } } /** * @typedef {Object} ObjectDetectionPipelineSingle * @property {string} label The class label identified by the model. * @property {number} score The score attributed by the model for that label. * @property {BoundingBox} box The bounding box of detected object in image's original size, or as a percentage if `percentage` is set to true. * @typedef {ObjectDetectionPipelineSingle[]} ObjectDetectionPipelineOutput * * @typedef {Object} ObjectDetectionPipelineOptions Parameters specific to object detection pipelines. * @property {number} [threshold=0.9] The threshold used to filter boxes by score. * @property {boolean} [percentage=false] Whether to return the boxes coordinates in percentage (true) or in pixels (false). * * @callback ObjectDetectionPipelineCallback Detect objects (bounding boxes & classes) in the image(s) passed as inputs. * @param {ImagePipelineInputs} images The input images. * @param {ObjectDetectionPipelineOptions} [options] The options to use for object detection. * @returns {Promise<ObjectDetectionPipelineOutput|ObjectDetectionPipelineOutput[]>} A list of objects or a list of list of objects. * * @typedef {ImagePipelineConstructorArgs & ObjectDetectionPipelineCallback & Disposable} ObjectDetectionPipelineType */ /** * Object detection pipeline using any `AutoModelForObjectDetection`. * This pipeline predicts bounding boxes of objects and their classes. * * **Example:** Run object-detection with `Xenova/detr-resnet-50`. * ```javascript * const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50'); * const img = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/cats.jpg'; * const output = await detector(img, { threshold: 0.9 }); * // [{ * // score: 0.9976370930671692, * // label: "remote", * // box: { xmin: 31, ymin: 68, xmax: 190, ymax: 118 } * // }, * // ... * // { * // score: 0.9984092116355896, * // label: "cat", * // box: { xmin: 331, ymin: 19, xmax: 649, ymax: 371 } * // }] * ``` */ export class ObjectDetectionPipeline extends (/** @type {new (options: ImagePipelineConstructorArgs) => ObjectDetectionPipelineType} */ (Pipeline)) { /** * Create a new ObjectDetectionPipeline. * @param {ImagePipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {ObjectDetectionPipelineCallback} */ async _call(images, { threshold = 0.9, percentage = false, } = {}) { const isBatched = Array.isArray(images); if (isBatched && images.length !== 1) { throw Error("Object detection pipeline currently only supports a batch size of 1."); } const preparedImages = await prepareImages(images); const imageSizes = percentage ? null : preparedImages.map(x => [x.height, x.width]); const { pixel_values, pixel_mask } = await this.processor(preparedImages); const output = await this.model({ pixel_values, pixel_mask }); // @ts-ignore const processed = this.processor.image_processor.post_process_object_detection(output, threshold, imageSizes); // Add labels // @ts-expect-error TS2339 const id2label = this.model.config.id2label; // Format output /** @type {ObjectDetectionPipelineOutput[]} */ const result = processed.map(batch => ( batch.boxes.map((box, i) => ({ score: batch.scores[i], label: id2label[batch.classes[i]], box: get_bounding_box(box, !percentage), })) )) return isBatched ? result : result[0]; } } /** * @typedef {Object} ZeroShotObjectDetectionOutput * @property {string} label Text query corresponding to the found object. * @property {number} score Score corresponding to the object (between 0 and 1). * @property {BoundingBox} box Bounding box of the detected object in image's original size, or as a percentage if `percentage` is set to true. * * @typedef {Object} ZeroShotObjectDetectionPipelineOptions Parameters specific to zero-shot object detection pipelines. * @property {number} [threshold=0.1] The probability necessary to make a prediction. * @property {number} [top_k=null] The number of top predictions that will be returned by the pipeline. * If the provided number is `null` or higher than the number of predictions available, it will default * to the number of predictions. * @property {boolean} [percentage=false] Whether to return the boxes coordinates in percentage (true) or in pixels (false). * * @callback ZeroShotObjectDetectionPipelineCallback Detect objects (bounding boxes & classes) in the image(s) passed as inputs. * @param {ImagePipelineInputs} images The input images. * @param {string[]} candidate_labels What the model should recognize in the image. * @param {ZeroShotObjectDetectionPipelineOptions} [options] The options to use for zero-shot object detection. * @returns {Promise<ZeroShotObjectDetectionOutput[]|ZeroShotObjectDetectionOutput[][]>} An array of objects containing the predicted labels, scores, and bounding boxes. * * @typedef {TextImagePipelineConstructorArgs & ZeroShotObjectDetectionPipelineCallback & Disposable} ZeroShotObjectDetectionPipelineType */ /** * Zero-shot object detection pipeline. This pipeline predicts bounding boxes of * objects when you provide an image and a set of `candidate_labels`. * * **Example:** Zero-shot object detection w/ `Xenova/owlvit-base-patch32`. * ```javascript * const detector = await pipeline('zero-shot-object-detection', 'Xenova/owlvit-base-patch32'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/astronaut.png'; * const candidate_labels = ['human face', 'rocket', 'helmet', 'american flag']; * const output = await detector(url, candidate_labels); * // [ * // { * // score: 0.24392342567443848, * // label: 'human face', * // box: { xmin: 180, ymin: 67, xmax: 274, ymax: 175 } * // }, * // { * // score: 0.15129457414150238, * // label: 'american flag', * // box: { xmin: 0, ymin: 4, xmax: 106, ymax: 513 } * // }, * // { * // score: 0.13649864494800568, * // label: 'helmet', * // box: { xmin: 277, ymin: 337, xmax: 511, ymax: 511 } * // }, * // { * // score: 0.10262022167444229, * // label: 'rocket', * // box: { xmin: 352, ymin: -1, xmax: 463, ymax: 287 } * // } * // ] * ``` * * **Example:** Zero-shot object detection w/ `Xenova/owlvit-base-patch32` (returning top 4 matches and setting a threshold). * ```javascript * const detector = await pipeline('zero-shot-object-detection', 'Xenova/owlvit-base-patch32'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/beach.png'; * const candidate_labels = ['hat', 'book', 'sunglasses', 'camera']; * const output = await detector(url, candidate_labels, { top_k: 4, threshold: 0.05 }); * // [ * // { * // score: 0.1606510728597641, * // label: 'sunglasses', * // box: { xmin: 347, ymin: 229, xmax: 429, ymax: 264 } * // }, * // { * // score: 0.08935828506946564, * // label: 'hat', * // box: { xmin: 38, ymin: 174, xmax: 258, ymax: 364 } * // }, * // { * // score: 0.08530698716640472, * // label: 'camera', * // box: { xmin: 187, ymin: 350, xmax: 260, ymax: 411 } * // }, * // { * // score: 0.08349756896495819, * // label: 'book', * // box: { xmin: 261, ymin: 280, xmax: 494, ymax: 425 } * // } * // ] * ``` */ export class ZeroShotObjectDetectionPipeline extends (/** @type {new (options: TextImagePipelineConstructorArgs) => ZeroShotObjectDetectionPipelineType} */ (Pipeline)) { /** * Create a new ZeroShotObjectDetectionPipeline. * @param {TextImagePipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {ZeroShotObjectDetectionPipelineCallback} */ async _call(images, candidate_labels, { threshold = 0.1, top_k = null, percentage = false, } = {}) { const isBatched = Array.isArray(images); const preparedImages = await prepareImages(images); // Run tokenization const text_inputs = this.tokenizer(candidate_labels, { padding: true, truncation: true, }); // Run processor const model_inputs = await this.processor(preparedImages); // Since non-maximum suppression is performed for exporting, we need to // process each image separately. For more information, see: // https://github.com/huggingface/optimum/blob/e3b7efb1257c011db907ef40ab340e795cc5684c/optimum/exporters/onnx/model_configs.py#L1028-L1032 const toReturn = []; for (let i = 0; i < preparedImages.length; ++i) { const image = preparedImages[i]; const imageSize = percentage ? null : [[image.height, image.width]]; const pixel_values = model_inputs.pixel_values[i].unsqueeze_(0); // Run model with both text and pixel inputs const output = await this.model({ ...text_inputs, pixel_values }); let result; if ('post_process_grounded_object_detection' in this.processor) { // @ts-ignore const processed = this.processor.post_process_grounded_object_detection( output, text_inputs.input_ids, { // TODO: support separate threshold values box_threshold: threshold, text_threshold: threshold, target_sizes: imageSize, }, )[0]; result = processed.boxes.map((box, i) => ({ score: processed.scores[i], label: processed.labels[i], box: get_bounding_box(box, !percentage), })) } else { // @ts-ignore const processed = this.processor.image_processor.post_process_object_detection(output, threshold, imageSize, true)[0]; result = processed.boxes.map((box, i) => ({ score: processed.scores[i], label: candidate_labels[processed.classes[i]], box: get_bounding_box(box, !percentage), })) } result.sort((a, b) => b.score - a.score); if (top_k !== null) { result = result.slice(0, top_k); } toReturn.push(result) } return isBatched ? toReturn : toReturn[0]; } } /** * @typedef {Object} DocumentQuestionAnsweringSingle * @property {string} answer The generated text. * @typedef {DocumentQuestionAnsweringSingle[]} DocumentQuestionAnsweringOutput * * @callback DocumentQuestionAnsweringPipelineCallback Answer the question given as input by using the document. * @param {ImageInput} image The image of the document to use. * @param {string} question A question to ask of the document. * @param {Partial<import('./generation/configuration_utils.js').GenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model. * @returns {Promise<DocumentQuestionAnsweringOutput|DocumentQuestionAnsweringOutput[]>} An object (or array of objects) containing the answer(s). * * @typedef {TextImagePipelineConstructorArgs & DocumentQuestionAnsweringPipelineCallback & Disposable} DocumentQuestionAnsweringPipelineType */ /** * Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. * The inputs/outputs are similar to the (extractive) question answering pipeline; however, * the pipeline takes an image (and optional OCR'd words/boxes) as input instead of text context. * * **Example:** Answer questions about a document with `Xenova/donut-base-finetuned-docvqa`. * ```javascript * const qa_pipeline = await pipeline('document-question-answering', 'Xenova/donut-base-finetuned-docvqa'); * const image = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/invoice.png'; * const question = 'What is the invoice number?'; * const output = await qa_pipeline(image, question); * // [{ answer: 'us-001' }] * ``` */ export class DocumentQuestionAnsweringPipeline extends (/** @type {new (options: TextImagePipelineConstructorArgs) => DocumentQuestionAnsweringPipelineType} */ (Pipeline)) { /** * Create a new DocumentQuestionAnsweringPipeline. * @param {TextImagePipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {DocumentQuestionAnsweringPipelineCallback} */ async _call(image, question, generate_kwargs = {}) { // NOTE: For now, we only support a batch size of 1 // Preprocess image const preparedImage = (await prepareImages(image))[0]; const { pixel_values } = await this.processor(preparedImage); // Run tokenization const task_prompt = `<s_docvqa><s_question>${question}</s_question><s_answer>`; const decoder_input_ids = this.tokenizer(task_prompt, { add_special_tokens: false, padding: true, truncation: true, }).input_ids; // Run model const output = await this.model.generate({ inputs: pixel_values, // @ts-expect-error TS2339 max_length: this.model.config.decoder.max_position_embeddings, decoder_input_ids, ...generate_kwargs, }); // Decode output const decoded = this.tokenizer.batch_decode(/** @type {Tensor} */(output))[0]; // Parse answer const match = decoded.match(/<s_answer>(.*?)<\/s_answer>/); let answer = null; if (match && match.length >= 2) { answer = match[1].trim(); } return [{ answer }]; } } /** * @typedef {Object} VocoderOptions * @property {PreTrainedModel} [vocoder] The vocoder used by the pipeline (if the model uses one). If not provided, use the default HifiGan vocoder. * @typedef {TextAudioPipelineConstructorArgs & VocoderOptions} TextToAudioPipelineConstructorArgs */ /** * @typedef {Object} TextToAudioOutput * @property {Float32Array} audio The generated audio waveform. * @property {number} sampling_rate The sampling rate of the generated audio waveform. * * @typedef {Object} TextToAudioPipelineOptions Parameters specific to text-to-audio pipelines. * @property {Tensor|Float32Array|string|URL} [speaker_embeddings=null] The speaker embeddings (if the model requires it). * * @callback TextToAudioPipelineCallback Generates speech/audio from the inputs. * @param {string|string[]} texts The text(s) to generate. * @param {TextToAudioPipelineOptions} options Parameters passed to the model generation/forward method. * @returns {Promise<TextToAudioOutput>} An object containing the generated audio and sampling rate. * * @typedef {TextToAudioPipelineConstructorArgs & TextToAudioPipelineCallback & Disposable} TextToAudioPipelineType */ /** * Text-to-audio generation pipeline using any `AutoModelForTextToWaveform` or `AutoModelForTextToSpectrogram`. * This pipeline generates an audio file from an input text and optional other conditional inputs. * * **Example:** Generate audio from text with `Xenova/speecht5_tts`. * ```javascript * const synthesizer = await pipeline('text-to-speech', 'Xenova/speecht5_tts', { quantized: false }); * const speaker_embeddings = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/speaker_embeddings.bin'; * const out = await synthesizer('Hello, my dog is cute', { speaker_embeddings }); * // RawAudio { * // audio: Float32Array(26112) [-0.00005657337896991521, 0.00020583874720614403, ...], * // sampling_rate: 16000 * // } * ``` * * You can then save the audio to a .wav file with the `wavefile` package: * ```javascript * import wavefile from 'wavefile'; * import fs from 'fs'; * * const wav = new wavefile.WaveFile(); * wav.fromScratch(1, out.sampling_rate, '32f', out.audio); * fs.writeFileSync('out.wav', wav.toBuffer()); * ``` * * **Example:** Multilingual speech generation with `Xenova/mms-tts-fra`. See [here](https://huggingface.co/models?pipeline_tag=text-to-speech&other=vits&sort=trending) for the full list of available languages (1107). * ```javascript * const synthesizer = await pipeline('text-to-speech', 'Xenova/mms-tts-fra'); * const out = await synthesizer('Bonjour'); * // RawAudio { * // audio: Float32Array(23808) [-0.00037693005288019776, 0.0003325853613205254, ...], * // sampling_rate: 16000 * // } * ``` */ export class TextToAudioPipeline extends (/** @type {new (options: TextToAudioPipelineConstructorArgs) => TextToAudioPipelineType} */ (Pipeline)) { DEFAULT_VOCODER_ID = "Xenova/speecht5_hifigan" /** * Create a new TextToAudioPipeline. * @param {TextToAudioPipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); // TODO: Find a better way for `pipeline` to set the default vocoder this.vocoder = options.vocoder ?? null; } /** @type {TextToAudioPipelineCallback} */ async _call(text_inputs, { speaker_embeddings = null, } = {}) { // If this.processor is not set, we are using a `AutoModelForTextToWaveform` model if (this.processor) { return this._call_text_to_spectrogram(text_inputs, { speaker_embeddings }); } else { return this._call_text_to_waveform(text_inputs); } } async _call_text_to_waveform(text_inputs) { // Run tokenization const inputs = this.tokenizer(text_inputs, { padding: true, truncation: true, }); // Generate waveform const { waveform } = await this.model(inputs); // @ts-expect-error TS2339 const sampling_rate = this.model.config.sampling_rate; return new RawAudio( waveform.data, sampling_rate, ) } async _call_text_to_spectrogram(text_inputs, { speaker_embeddings }) { // Load vocoder, if not provided if (!this.vocoder) { console.log('No vocoder specified, using default HifiGan vocoder.'); this.vocoder = await AutoModel.from_pretrained(this.DEFAULT_VOCODER_ID, { dtype: 'fp32' }); } // Load speaker embeddings as Float32Array from path/URL if (typeof speaker_embeddings === 'string' || speaker_embeddings instanceof URL) { // Load from URL with fetch speaker_embeddings = new Float32Array( await (await fetch(speaker_embeddings)).arrayBuffer() ); } if (speaker_embeddings instanceof Float32Array) { speaker_embeddings = new Tensor( 'float32', speaker_embeddings, [1, speaker_embeddings.length] ) } else if (!(speaker_embeddings instanceof Tensor)) { throw new Error("Speaker embeddings must be a `Tensor`, `Float32Array`, `string`, or `URL`.") } // Run tokenization const { input_ids } = this.tokenizer(text_inputs, { padding: true, truncation: true, }); // NOTE: At this point, we are guaranteed that `speaker_embeddings` is a `Tensor` // @ts-ignore const { waveform } = await this.model.generate_speech(input_ids, speaker_embeddings, { vocoder: this.vocoder }); const sampling_rate = this.processor.feature_extractor.config.sampling_rate; return new RawAudio( waveform.data, sampling_rate, ) } } /** * @callback ImageToImagePipelineCallback Transform the image(s) passed as inputs. * @param {ImagePipelineInputs} images The images to transform. * @returns {Promise<RawImage|RawImage[]>} The transformed image or list of images. * * @typedef {ImagePipelineConstructorArgs & ImageToImagePipelineCallback & Disposable} ImageToImagePipelineType */ /** * Image to Image pipeline using any `AutoModelForImageToImage`. This pipeline generates an image based on a previous image input. * * **Example:** Super-resolution w/ `Xenova/swin2SR-classical-sr-x2-64` * ```javascript * const upscaler = await pipeline('image-to-image', 'Xenova/swin2SR-classical-sr-x2-64'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/butterfly.jpg'; * const output = await upscaler(url); * // RawImage { * // data: Uint8Array(786432) [ 41, 31, 24, 43, ... ], * // width: 512, * // height: 512, * // channels: 3 * // } * ``` */ export class ImageToImagePipeline extends (/** @type {new (options: ImagePipelineConstructorArgs) => ImageToImagePipelineType} */ (Pipeline)) { /** * Create a new ImageToImagePipeline. * @param {ImagePipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {ImageToImagePipelineCallback} */ async _call(images) { const preparedImages = await prepareImages(images); const inputs = await this.processor(preparedImages); const outputs = await this.model(inputs); /** @type {RawImage[]} */ const toReturn = []; for (const batch of outputs.reconstruction) { const output = batch.squeeze().clamp_(0, 1).mul_(255).round_().to('uint8'); toReturn.push(RawImage.fromTensor(output)); } return toReturn.length > 1 ? toReturn : toReturn[0]; } } /** * @typedef {Object} DepthEstimationPipelineOutput * @property {Tensor} predicted_depth The raw depth map predicted by the model. * @property {RawImage} depth The processed depth map as an image (with the same size as the input image). * * @callback DepthEstimationPipelineCallback Predicts the depth for the image(s) passed as inputs. * @param {ImagePipelineInputs} images The images to compute depth for. * @returns {Promise<DepthEstimationPipelineOutput|DepthEstimationPipelineOutput[]>} An image or a list of images containing result(s). * * @typedef {ImagePipelineConstructorArgs & DepthEstimationPipelineCallback & Disposable} DepthEstimationPipelineType */ /** * Depth estimation pipeline using any `AutoModelForDepthEstimation`. This pipeline predicts the depth of an image. * * **Example:** Depth estimation w/ `Xenova/dpt-hybrid-midas` * ```javascript * const depth_estimator = await pipeline('depth-estimation', 'Xenova/dpt-hybrid-midas'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/cats.jpg'; * const out = await depth_estimator(url); * // { * // predicted_depth: Tensor { * // dims: [ 384, 384 ], * // type: 'float32', * // data: Float32Array(147456) [ 542.859130859375, 545.2833862304688, 546.1649169921875, ... ], * // size: 147456 * // }, * // depth: RawImage { * // data: Uint8Array(307200) [ 86, 86, 86, ... ], * // width: 640, * // height: 480, * // channels: 1 * // } * // } * ``` */ export class DepthEstimationPipeline extends (/** @type {new (options: ImagePipelineConstructorArgs) => DepthEstimationPipelineType} */ (Pipeline)) { /** * Create a new DepthEstimationPipeline. * @param {ImagePipelineConstructorArgs} options An object used to instantiate the pipeline. */ constructor(options) { super(options); } /** @type {DepthEstimationPipelineCallback} */ async _call(images) { const preparedImages = await prepareImages(images); const inputs = await this.processor(preparedImages); const { predicted_depth } = await this.model(inputs); const toReturn = []; for (let i = 0; i < preparedImages.length; ++i) { const batch = predicted_depth[i]; const [height, width] = batch.dims.slice(-2); const [new_width, new_height] = preparedImages[i].size; // Interpolate to original size const prediction = (await interpolate_4d(batch.view(1, 1, height, width), { size: [new_height, new_width], mode: 'bilinear', })).view(new_height, new_width); const minval = /** @type {number} */(prediction.min().item()); const maxval = /** @type {number} */(prediction.max().item()); const formatted = prediction.sub(minval).div_(maxval - minval).mul_(255).to('uint8').unsqueeze(0); const depth = RawImage.fromTensor(formatted); toReturn.push({ predicted_depth: prediction, depth, }); } return toReturn.length > 1 ? toReturn : toReturn[0]; } } const SUPPORTED_TASKS = Object.freeze({ "text-classification": { "tokenizer": AutoTokenizer, "pipeline": TextClassificationPipeline, "model": AutoModelForSequenceClassification, "default": { // TODO: replace with original // "model": "distilbert-base-uncased-finetuned-sst-2-english", "model": "Xenova/distilbert-base-uncased-finetuned-sst-2-english", }, "type": "text", }, "token-classification": { "tokenizer": AutoTokenizer, "pipeline": TokenClassificationPipeline, "model": AutoModelForTokenClassification, "default": { // TODO: replace with original // "model": "Davlan/bert-base-multilingual-cased-ner-hrl", "model": "Xenova/bert-base-multilingual-cased-ner-hrl", }, "type": "text", }, "question-answering": { "tokenizer": AutoTokenizer, "pipeline": QuestionAnsweringPipeline, "model": AutoModelForQuestionAnswering, "default": { // TODO: replace with original // "model": "distilbert-base-cased-distilled-squad", "model": "Xenova/distilbert-base-cased-distilled-squad", }, "type": "text", }, "fill-mask": { "tokenizer": AutoTokenizer, "pipeline": FillMaskPipeline, "model": AutoModelForMaskedLM, "default": { // TODO: replace with original // "model": "bert-base-uncased", "model": "Xenova/bert-base-uncased", }, "type": "text", }, "summarization": { "tokenizer": AutoTokenizer, "pipeline": SummarizationPipeline, "model": AutoModelForSeq2SeqLM, "default": { // TODO: replace with original // "model": "sshleifer/distilbart-cnn-6-6", "model": "Xenova/distilbart-cnn-6-6", }, "type": "text", }, "translation": { "tokenizer": AutoTokenizer, "pipeline": TranslationPipeline, "model": AutoModelForSeq2SeqLM, "default": { // TODO: replace with original // "model": "t5-small", "model": "Xenova/t5-small", }, "type": "text", }, "text2text-generation": { "tokenizer": AutoTokenizer, "pipeline": Text2TextGenerationPipeline, "model": AutoModelForSeq2SeqLM, "default": { // TODO: replace with original // "model": "google/flan-t5-small", "model": "Xenova/flan-t5-small", }, "type": "text", }, "text-generation": { "tokenizer": AutoTokenizer, "pipeline": TextGenerationPipeline, "model": AutoModelForCausalLM, "default": { // TODO: replace with original // "model": "gpt2", "model": "Xenova/gpt2", }, "type": "text", }, "zero-shot-classification": { "tokenizer": AutoTokenizer, "pipeline": ZeroShotClassificationPipeline, "model": AutoModelForSequenceClassification, "default": { // TODO: replace with original // "model": "typeform/distilbert-base-uncased-mnli", "model": "Xenova/distilbert-base-uncased-mnli", }, "type": "text", }, "audio-classification": { "pipeline": AudioClassificationPipeline, "model": AutoModelForAudioClassification, "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "superb/wav2vec2-base-superb-ks", "model": "Xenova/wav2vec2-base-superb-ks", }, "type": "audio", }, "zero-shot-audio-classification": { "tokenizer": AutoTokenizer, "pipeline": ZeroShotAudioClassificationPipeline, "model": AutoModel, "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "laion/clap-htsat-fused", "model": "Xenova/clap-htsat-unfused", }, "type": "multimodal", }, "automatic-speech-recognition": { "tokenizer": AutoTokenizer, "pipeline": AutomaticSpeechRecognitionPipeline, "model": [AutoModelForSpeechSeq2Seq, AutoModelForCTC], "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "openai/whisper-tiny.en", "model": "Xenova/whisper-tiny.en", }, "type": "multimodal", }, "text-to-audio": { "tokenizer": AutoTokenizer, "pipeline": TextToAudioPipeline, "model": [AutoModelForTextToWaveform, AutoModelForTextToSpectrogram], "processor": [AutoProcessor, /* Some don't use a processor */ null], "default": { // TODO: replace with original // "model": "microsoft/speecht5_tts", "model": "Xenova/speecht5_tts", }, "type": "text", }, "image-to-text": { "tokenizer": AutoTokenizer, "pipeline": ImageToTextPipeline, "model": AutoModelForVision2Seq, "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "nlpconnect/vit-gpt2-image-captioning", "model": "Xenova/vit-gpt2-image-captioning", }, "type": "multimodal", }, "image-classification": { // no tokenizer "pipeline": ImageClassificationPipeline, "model": AutoModelForImageClassification, "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "google/vit-base-patch16-224", "model": "Xenova/vit-base-patch16-224", }, "type": "multimodal", }, "image-segmentation": { // no tokenizer "pipeline": ImageSegmentationPipeline, "model": [AutoModelForImageSegmentation, AutoModelForSemanticSegmentation, AutoModelForUniversalSegmentation], "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "facebook/detr-resnet-50-panoptic", "model": "Xenova/detr-resnet-50-panoptic", }, "type": "multimodal", }, "background-removal": { // no tokenizer "pipeline": BackgroundRemovalPipeline, "model": [AutoModelForImageSegmentation, AutoModelForSemanticSegmentation, AutoModelForUniversalSegmentation], "processor": AutoProcessor, "default": { "model": "Xenova/modnet", }, "type": "image", }, "zero-shot-image-classification": { "tokenizer": AutoTokenizer, "pipeline": ZeroShotImageClassificationPipeline, "model": AutoModel, "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "openai/clip-vit-base-patch32", "model": "Xenova/clip-vit-base-patch32", }, "type": "multimodal", }, "object-detection": { // no tokenizer "pipeline": ObjectDetectionPipeline, "model": AutoModelForObjectDetection, "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "facebook/detr-resnet-50", "model": "Xenova/detr-resnet-50", }, "type": "multimodal", }, "zero-shot-object-detection": { "tokenizer": AutoTokenizer, "pipeline": ZeroShotObjectDetectionPipeline, "model": AutoModelForZeroShotObjectDetection, "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "google/owlvit-base-patch32", "model": "Xenova/owlvit-base-patch32", }, "type": "multimodal", }, "document-question-answering": { "tokenizer": AutoTokenizer, "pipeline": DocumentQuestionAnsweringPipeline, "model": AutoModelForDocumentQuestionAnswering, "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "naver-clova-ix/donut-base-finetuned-docvqa", "model": "Xenova/donut-base-finetuned-docvqa", }, "type": "multimodal", }, "image-to-image": { // no tokenizer "pipeline": ImageToImagePipeline, "model": AutoModelForImageToImage, "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "caidas/swin2SR-classical-sr-x2-64", "model": "Xenova/swin2SR-classical-sr-x2-64", }, "type": "image", }, "depth-estimation": { // no tokenizer "pipeline": DepthEstimationPipeline, "model": AutoModelForDepthEstimation, "processor": AutoProcessor, "default": { // TODO: replace with original // "model": "Intel/dpt-large", "model": "Xenova/dpt-large", }, "type": "image", }, // This task serves as a useful interface for dealing with sentence-transformers (https://huggingface.co/sentence-transformers). "feature-extraction": { "tokenizer": AutoTokenizer, "pipeline": FeatureExtractionPipeline, "model": AutoModel, "default": { // TODO: replace with original // "model": "sentence-transformers/all-MiniLM-L6-v2", "model": "Xenova/all-MiniLM-L6-v2", }, "type": "text", }, "image-feature-extraction": { "processor": AutoProcessor, "pipeline": ImageFeatureExtractionPipeline, "model": [AutoModelForImageFeatureExtraction, AutoModel], "default": { // TODO: replace with original // "model": "google/vit-base-patch16-224", "model": "Xenova/vit-base-patch16-224-in21k", }, "type": "image", }, }) // TODO: Add types for TASK_ALIASES const TASK_ALIASES = Object.freeze({ "sentiment-analysis": "text-classification", "ner": "token-classification", // "vqa": "visual-question-answering", // TODO: Add "asr": "automatic-speech-recognition", "text-to-speech": "text-to-audio", // Add for backwards compatibility "embeddings": "feature-extraction", }); /** * @typedef {keyof typeof SUPPORTED_TASKS} TaskType * @typedef {keyof typeof TASK_ALIASES} AliasType * @typedef {TaskType | AliasType} PipelineType All possible pipeline types. * @typedef {{[K in TaskType]: InstanceType<typeof SUPPORTED_TASKS[K]["pipeline"]>}} SupportedTasks A mapping of pipeline names to their corresponding pipeline classes. * @typedef {{[K in AliasType]: InstanceType<typeof SUPPORTED_TASKS[TASK_ALIASES[K]]["pipeline"]>}} AliasTasks A mapping from pipeline aliases to their corresponding pipeline classes. * @typedef {SupportedTasks & AliasTasks} AllTasks A mapping from all pipeline names and aliases to their corresponding pipeline classes. */ /** * Utility factory method to build a `Pipeline` object. * * @template {PipelineType} T The type of pipeline to return. * @param {T} task The task defining which pipeline will be returned. Currently accepted tasks are: * - `"audio-classification"`: will return a `AudioClassificationPipeline`. * - `"automatic-speech-recognition"`: will return a `AutomaticSpeechRecognitionPipeline`. * - `"depth-estimation"`: will return a `DepthEstimationPipeline`. * - `"document-question-answering"`: will return a `DocumentQuestionAnsweringPipeline`. * - `"feature-extraction"`: will return a `FeatureExtractionPipeline`. * - `"fill-mask"`: will return a `FillMaskPipeline`. * - `"image-classification"`: will return a `ImageClassificationPipeline`. * - `"image-segmentation"`: will return a `ImageSegmentationPipeline`. * - `"image-to-text"`: will return a `ImageToTextPipeline`. * - `"object-detection"`: will return a `ObjectDetectionPipeline`. * - `"question-answering"`: will return a `QuestionAnsweringPipeline`. * - `"summarization"`: will return a `SummarizationPipeline`. * - `"text2text-generation"`: will return a `Text2TextGenerationPipeline`. * - `"text-classification"` (alias "sentiment-analysis" available): will return a `TextClassificationPipeline`. * - `"text-generation"`: will return a `TextGenerationPipeline`. * - `"token-classification"` (alias "ner" available): will return a `TokenClassificationPipeline`. * - `"translation"`: will return a `TranslationPipeline`. * - `"translation_xx_to_yy"`: will return a `TranslationPipeline`. * - `"zero-shot-classification"`: will return a `ZeroShotClassificationPipeline`. * - `"zero-shot-audio-classification"`: will return a `ZeroShotAudioClassificationPipeline`. * - `"zero-shot-image-classification"`: will return a `ZeroShotImageClassificationPipeline`. * - `"zero-shot-object-detection"`: will return a `ZeroShotObjectDetectionPipeline`. * @param {string} [model=null] The name of the pre-trained model to use. If not specified, the default model for the task will be used. * @param {import('./utils/hub.js').PretrainedModelOptions} [options] Optional parameters for the pipeline. * @returns {Promise<AllTasks[T]>} A Pipeline object for the specified task. * @throws {Error} If an unsupported pipeline is requested. */ export async function pipeline( task, model = null, { progress_callback = null, config = null, cache_dir = null, local_files_only = false, revision = 'main', device = null, dtype = null, subfolder = 'onnx', use_external_data_format = null, model_file_name = null, session_options = {}, } = {} ) { // Helper method to construct pipeline // Apply aliases // @ts-ignore task = TASK_ALIASES[task] ?? task; // Get pipeline info const pipelineInfo = SUPPORTED_TASKS[task.split('_', 1)[0]]; if (!pipelineInfo) { throw Error(`Unsupported pipeline: ${task}. Must be one of [${Object.keys(SUPPORTED_TASKS)}]`) } // Use model if specified, otherwise, use default if (!model) { model = pipelineInfo.default.model console.log(`No model specified. Using default model: "${model}".`); } const pretrainedOptions = { progress_callback, config, cache_dir, local_files_only, revision, device, dtype, subfolder, use_external_data_format, model_file_name, session_options, } const classes = new Map([ ['tokenizer', pipelineInfo.tokenizer], ['model', pipelineInfo.model], ['processor', pipelineInfo.processor], ]); // Load model, tokenizer, and processor (if they exist) const results = await loadItems(classes, model, pretrainedOptions); results.task = task; dispatchCallback(progress_callback, { 'status': 'ready', 'task': task, 'model': model, }); const pipelineClass = pipelineInfo.pipeline; return new pipelineClass(results); } /** * Helper function to get applicable model, tokenizer, or processor classes for a given model. * @param {Map<string, any>} mapping The mapping of names to classes, arrays of classes, or null. * @param {string} model The name of the model to load. * @param {import('./utils/hub.js').PretrainedOptions} pretrainedOptions The options to pass to the `from_pretrained` method. * @private */ async function loadItems(mapping, model, pretrainedOptions) { const result = Object.create(null); /**@type {Promise[]} */ const promises = []; for (const [name, cls] of mapping.entries()) { if (!cls) continue; /**@type {Promise} */ let promise; if (Array.isArray(cls)) { promise = new Promise(async (resolve, reject) => { let e; for (const c of cls) { if (c === null) { // If null, we resolve it immediately, meaning the relevant // class was not found, but it is optional. resolve(null); return; } try { resolve(await c.from_pretrained(model, pretrainedOptions)); return; } catch (err) { if (err.message?.includes('Unsupported model type')) { // If the error is due to an unsupported model type, we // save the error and try the next class. e = err; } else if (err.message?.includes('Could not locate file')) { e = err; } else { reject(err); return; } } } reject(e); }) } else { promise = cls.from_pretrained(model, pretrainedOptions); } result[name] = promise; promises.push(promise); } // Wait for all promises to resolve (in parallel) await Promise.all(promises); // Then assign to result for (const [name, promise] of Object.entries(result)) { result[name] = await promise; } return result; }
transformers.js/src/pipelines.js/0
{ "file_path": "transformers.js/src/pipelines.js", "repo_id": "transformers.js", "token_count": 57285 }
318
import { spawnSync } from "child_process"; const MODULE_NAME = "@huggingface/transformers"; const CODE_BODY = ` const model_id = "hf-internal-testing/tiny-random-LlamaForCausalLM"; const generator = await pipeline("text-generation", model_id, { dtype: "fp32" }); const result = await generator("hello", { max_new_tokens: 3, return_full_text: false }); process.stdout.write(result[0].generated_text); `; const TARGET_OUTPUT = "erdingsdelete mely"; const wrap_async_iife = (code) => `(async function() { ${code} })();`; const check = (code, module = false) => { const args = ["-e", code]; if (module) args.push("--input-type=module"); const { status, stdout, stderr } = spawnSync("node", args); expect(stderr.toString()).toEqual(""); // No warnings or errors are printed expect(stdout.toString()).toEqual(TARGET_OUTPUT); // The output should match expect(status).toEqual(0); // The process should exit cleanly }; describe("Testing the bundle", () => { it("ECMAScript Module (ESM)", () => { check(`import { pipeline } from "${MODULE_NAME}";${CODE_BODY}`, true); }); it("CommonJS (CJS) with require", () => { check(`const { pipeline } = require("${MODULE_NAME}");${wrap_async_iife(CODE_BODY)}`); }); it("CommonJS (CJS) with dynamic import", () => { check(`${wrap_async_iife(`const { pipeline } = await import("${MODULE_NAME}");${CODE_BODY}`)}`); }); });
transformers.js/tests/bundles.test.js/0
{ "file_path": "transformers.js/tests/bundles.test.js", "repo_id": "transformers.js", "token_count": 498 }
319
import { AutoFeatureExtractor, ClapFeatureExtractor } from "../../../src/transformers.js"; import { load_cached_audio } from "../../asset_cache.js"; import { MAX_FEATURE_EXTRACTOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js"; export default () => { // ClapFeatureExtractor describe("ClapFeatureExtractor", () => { const model_id = "Xenova/clap-htsat-unfused"; /** @type {ClapFeatureExtractor} */ let feature_extractor; beforeAll(async () => { feature_extractor = await AutoFeatureExtractor.from_pretrained(model_id); }, MAX_FEATURE_EXTRACTOR_LOAD_TIME); it( "truncation", async () => { const audio = await load_cached_audio("mlk"); // Since truncation uses a random strategy, we override // Math.random to ensure that the test is deterministic const originalRandom = Math.random; Math.random = () => 0.5; let long_audio = new Float32Array(500000); long_audio.set(audio); long_audio.set(audio, long_audio.length - audio.length); const { input_features } = await feature_extractor(long_audio); const { dims, data } = input_features; expect(dims).toEqual([1, 1, 1001, 64]); expect(input_features.mean().item()).toBeCloseTo(-37.94569396972656); expect(data[0]).toBeCloseTo(-53.32647705078125); expect(data[1]).toBeCloseTo(-47.76755142211914); expect(data[65]).toBeCloseTo(-36.32261276245117); expect(data[1002]).toBeCloseTo(-28.0314884185791); expect(data[10000]).toBeCloseTo(-21.905902862548828); expect(data[60000]).toBeCloseTo(-14.877863883972168); expect(data[64062]).toBeCloseTo(-37.9784049987793); expect(data[64063]).toBeCloseTo(-37.73963928222656); // Reset Math.random Math.random = originalRandom; }, MAX_TEST_EXECUTION_TIME, ); it( "padding", async () => { const audio = await load_cached_audio("mlk"); const { input_features } = await feature_extractor(audio); const { data, dims } = input_features; expect(dims).toEqual([1, 1, 1001, 64]); expect(input_features.mean().item()).toBeCloseTo(-34.99049377441406); expect(data[0]).toBeCloseTo(-21.32573890686035); expect(data[1]).toBeCloseTo(-26.168411254882812); expect(data[65]).toBeCloseTo(-29.716018676757812); expect(data[1002]).toBeCloseTo(-32.16273498535156); expect(data[10000]).toBeCloseTo(-19.9283390045166); // padded values expect(data[60000]).toBeCloseTo(-100.0); expect(data[64062]).toBeCloseTo(-100.0); expect(data[64063]).toBeCloseTo(-100.0); }, MAX_TEST_EXECUTION_TIME, ); }); };
transformers.js/tests/models/clap/test_feature_extraction_clap.js/0
{ "file_path": "transformers.js/tests/models/clap/test_feature_extraction_clap.js", "repo_id": "transformers.js", "token_count": 1203 }
320
import { FalconTokenizer } from "../../../src/tokenizers.js"; import { BASE_TEST_STRINGS, FALCON_TEST_STRINGS } from "../test_strings.js"; export const TOKENIZER_CLASS = FalconTokenizer; export const TEST_CONFIG = { "tiiuae/falcon-7b": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, tokens: ["How", "\u0120are", "\u0120you", "\u0120doing", "?"], ids: [1830, 362, 299, 1836, 42], decoded: "How are you doing?", }, SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["You", "\u0120should", "'", "ve", "\u0120done", "\u0120this"], ids: [1357, 808, 18, 298, 1782, 414], decoded: "You should've done this", }, NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["012", "345", "678", "9", "\u0120", "0", "\u0120", "1", "\u0120", "2", "\u0120", "3", "\u0120", "4", "\u0120", "5", "\u0120", "6", "\u0120", "7", "\u0120", "8", "\u0120", "9", "\u0120", "10", "\u0120", "100", "\u0120", "100", "0"], ids: [24445, 29094, 41583, 36, 204, 27, 204, 28, 204, 29, 204, 30, 204, 31, 204, 32, 204, 33, 204, 34, 204, 35, 204, 36, 204, 696, 204, 1425, 204, 1425, 27], decoded: "0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["The", "\u0120company", "\u0120was", "\u0120founded", "\u0120in", "\u0120", "201", "6", "."], ids: [487, 1438, 398, 9923, 272, 204, 626, 33, 25], decoded: "The company was founded in 2016.", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["A", "\u010a", "'", "ll", "\u0120", "!!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can", "'", "t", "."], ids: [44, 193, 18, 567, 204, 1409, 534, 12493, 79, 7544, 79, 275, 23, 418, 18, 95, 25], decoded: "A\n'll!!to?'d''d of, can't.", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["def", "\u0120main", "():", "\u010a", "\u0109", "pass"], ids: [3071, 1316, 13160, 193, 192, 5412], decoded: "def main():\n\tpass", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["let", "\u0120a", "\u0120", "=", "\u0120obj", ".", "toString", "();", "\u010a", "toString", "();"], ids: [1025, 241, 204, 40, 13756, 25, 19409, 2032, 193, 19409, 2032], decoded: "let a = obj.toString();\ntoString();", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["This", "\u010a", "\u010a", "is", "\u010a", "a", "\u010a", "test", "."], ids: [1182, 193, 193, 259, 193, 76, 193, 4780, 25], decoded: "This\n\nis\na\ntest.", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["UN", "want", "\u00c3\u00a9d", ",", "running"], ids: [4000, 32108, 5706, 23, 27386], decoded: "UNwant\u00e9d,running", }, CONTROL_TOKENS: { text: BASE_TEST_STRINGS.CONTROL_TOKENS, tokens: ["1", "\u0100", "2", "\u00ef\u00bf", "\u00bd", "3"], ids: [28, 186, 29, 13112, 133, 30], decoded: "1\u00002\ufffd3", }, HELLO_WORLD_TITLECASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_TITLECASE, tokens: ["Hello", "\u0120World"], ids: [9856, 2889], decoded: "Hello World", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["hello", "\u0120world"], ids: [30835, 1079], decoded: "hello world", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u00e7\u0136\u0141\u00e6\u00b4\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"], ids: [32725, 1105, 15498, 8061, 233, 2364], decoded: "\u751f\u6d3b\u7684\u771f\u8c1b\u662f", }, LEADING_SPACE: { text: BASE_TEST_STRINGS.LEADING_SPACE, tokens: ["\u0120\u0120", "\u0120leading", "\u0120space"], ids: [258, 3736, 2151], decoded: " leading space", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["tra", "iling", "\u0120space", "\u0120\u0120\u0120"], ids: [9172, 4447, 2151, 466], decoded: "trailing space ", }, DOUBLE_SPACE: { text: BASE_TEST_STRINGS.DOUBLE_SPACE, tokens: ["Hi", "\u0120", "\u0120Hello"], ids: [5516, 204, 23090], decoded: "Hi Hello", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["test", "\u0120", "$", "1", "\u0120R", "2", "\u0120", "#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2", "\u00a5", "6", "\u0120\u00e2\u0124", "\u00a3", "7", "\u0120\u00e2\u0124", "\u00b9", "8", "\u0120\u00e2\u0124", "\u00b1", "9", "\u0120test"], ids: [4780, 204, 15, 28, 382, 29, 204, 14, 30, 6471, 31, 5131, 32, 3068, 110, 33, 25631, 108, 34, 25631, 129, 35, 25631, 121, 36, 1318], decoded: "test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "\u0120bought", "\u0120an", "\u0120apple", "\u0120for", "\u0120", "$", "1", ".", "00", "\u0120at", "\u0120the", "\u0120store", "."], ids: [52, 5659, 267, 12381, 312, 204, 15, 28, 25, 527, 388, 248, 2946, 25], decoded: "I bought an apple for $1.00 at the store.", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["you", "\u00e2\u0122\u00a6", "\u0120\u0120"], ids: [5667, 898, 258], decoded: "you\u2026 ", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2\u0142\u00c2\u0142"], ids: [5667, 898, 60482], decoded: "you\u2026\u00a0\u00a0", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2\u0142", "\u00c2\u0142", "you", "\u00e2\u0122\u00a6", "\u00c2\u0142\u00c2\u0142"], ids: [5667, 898, 4381, 4381, 5667, 898, 60482], decoded: "you\u2026\u00a0\u00a0you\u2026\u00a0\u00a0", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["we", "ird", "\u0120", "\u00ef", "\u00bd", "\u0140", "\u0120edge", "\u0120", "\u00ef", "\u00bd", "\u0140", "\u0120case"], ids: [698, 1505, 204, 181, 133, 236, 5753, 204, 181, 133, 236, 1494], decoded: "weird \uff5e edge \uff5e case", }, SPIECE_UNDERSCORE: { text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE, tokens: ["\u00e2\u0138", "\u0123", "This", "\u0120\u00e2\u0138", "\u0123", "is", "\u0120\u00e2\u0138", "\u0123", "a", "\u0120\u00e2\u0138", "\u0123", "test", "\u0120\u00e2\u0138", "\u0123", "."], ids: [13856, 207, 1182, 26607, 207, 259, 26607, 207, 76, 26607, 207, 4780, 26607, 207, 25], decoded: "\u2581This \u2581is \u2581a \u2581test \u2581.", }, NUMBERS_SPLIT: { text: FALCON_TEST_STRINGS.NUMBERS_SPLIT, tokens: ["12", "\u0120and", "\u0120", "123", "\u0120and", "\u0120", "123", "4"], ids: [928, 273, 204, 10963, 273, 204, 10963, 31], decoded: "12 and 123 and 1234", }, }, "tiiuae/falcon-rw-1b": { SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["You", "\u0120should", "'ve", "\u0120done", "\u0120this"], ids: [1639, 815, 1053, 1760, 428], decoded: "You should've done this", }, NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["01", "23", "45", "67", "89", "\u01200", "\u01201", "\u01202", "\u01203", "\u01204", "\u01205", "\u01206", "\u01207", "\u01208", "\u01209", "\u012010", "\u0120100", "\u01201000"], ids: [486, 1954, 2231, 3134, 4531, 657, 352, 362, 513, 604, 642, 718, 767, 807, 860, 838, 1802, 8576], decoded: "0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["The", "\u0120company", "\u0120was", "\u0120founded", "\u0120in", "\u01202016", "."], ids: [464, 1664, 373, 9393, 287, 1584, 13], decoded: "The company was founded in 2016.", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["A", "\u010a", "'ll", "\u0120!!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can", "'t", "."], ids: [32, 198, 1183, 37867, 1462, 8348, 67, 7061, 67, 286, 11, 460, 470, 13], decoded: "A\n'll!!to?'d''d of, can't.", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["let", "\u0120a", "\u0120=", "\u0120obj", ".", "to", "String", "();", "\u010a", "to", "String", "();"], ids: [1616, 257, 796, 26181, 13, 1462, 10100, 9783, 198, 1462, 10100, 9783], decoded: "let a = obj.toString();\ntoString();", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["UN", "want", "\u00c3\u00a9", "d", ",", "running"], ids: [4944, 42949, 2634, 67, 11, 20270], decoded: "UNwant\u00e9d,running", }, CONTROL_TOKENS: { text: BASE_TEST_STRINGS.CONTROL_TOKENS, tokens: ["1", "\u0100", "2", "\u00ef\u00bf\u00bd", "3"], ids: [16, 188, 17, 4210, 18], decoded: "1\u00002\ufffd3", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u00e7\u0136\u0141", "\u00e6", "\u00b4", "\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e", "\u0141", "\u00e8", "\u00b0", "\u013d", "\u00e6\u013a\u00af"], ids: [37955, 162, 112, 119, 21410, 40367, 253, 164, 108, 249, 42468], decoded: "\u751f\u6d3b\u7684\u771f\u8c1b\u662f", }, LEADING_SPACE: { text: BASE_TEST_STRINGS.LEADING_SPACE, tokens: ["\u0120", "\u0120", "\u0120leading", "\u0120space"], ids: [220, 220, 3756, 2272], decoded: " leading space", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["tra", "iling", "\u0120space", "\u0120", "\u0120", "\u0120"], ids: [9535, 4386, 2272, 220, 220, 220], decoded: "trailing space ", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120test"], ids: [9288, 720, 16, 371, 17, 1303, 18, 10432, 19, 4248, 20, 38221, 21, 2343, 224, 96, 22, 2343, 224, 117, 23, 2343, 224, 109, 24, 1332], decoded: "test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "\u0120bought", "\u0120an", "\u0120apple", "\u0120for", "\u0120$", "1", ".", "00", "\u0120at", "\u0120the", "\u0120store", "."], ids: [40, 5839, 281, 17180, 329, 720, 16, 13, 405, 379, 262, 3650, 13], decoded: "I bought an apple for $1.00 at the store.", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["you", "\u00e2\u0122\u00a6", "\u0120", "\u0120"], ids: [5832, 1399, 220, 220], decoded: "you\u2026 ", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["we", "ird", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120edge", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120case"], ids: [732, 1447, 27332, 121, 252, 5743, 27332, 121, 252, 1339], decoded: "weird \uff5e edge \uff5e case", }, NUMBERS_SPLIT: { text: FALCON_TEST_STRINGS.NUMBERS_SPLIT, tokens: ["12", "\u0120and", "\u0120123", "\u0120and", "\u012012", "34"], ids: [1065, 290, 17031, 290, 1105, 2682], decoded: "12 and 123 and 1234", }, }, };
transformers.js/tests/models/falcon/test_tokenization_falcon.js/0
{ "file_path": "transformers.js/tests/models/falcon/test_tokenization_falcon.js", "repo_id": "transformers.js", "token_count": 6152 }
321
import { AutoProcessor, full, GroundingDinoProcessor } from "../../../src/transformers.js"; import { load_cached_image } from "../../asset_cache.js"; import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js"; export default () => { const model_id = "hf-internal-testing/tiny-random-GroundingDinoForObjectDetection"; describe("GroundingDinoProcessor", () => { /** @type {GroundingDinoProcessor} */ let processor; let images = {}; beforeAll(async () => { processor = await AutoProcessor.from_pretrained(model_id); images = { white_image: await load_cached_image("white_image"), }; }, MAX_PROCESSOR_LOAD_TIME); it( "Single image & text", async () => { const { input_ids, pixel_values } = await processor(images.white_image, "a cat."); expect(input_ids.dims).toEqual([1, 5]); expect(pixel_values.dims).toEqual([1, 3, 800, 800]); }, MAX_TEST_EXECUTION_TIME, ); it( "post_process_grounded_object_detection", async () => { const outputs = { logits: full([1, 900, 256], 0.5), pred_boxes: full([1, 900, 4], 0.5), }; const inputs = { input_ids: full([1, 5], 1n), }; const results = processor.post_process_grounded_object_detection(outputs, inputs.input_ids, { box_threshold: 0.3, text_threshold: 0.3, target_sizes: [[360, 240]], }); const { scores, boxes, labels } = results[0]; expect(scores).toHaveLength(900); expect(boxes).toHaveLength(900); expect(labels).toHaveLength(900); expect(boxes[0]).toEqual([60, 90, 180, 270]); expect(scores[0]).toBeCloseTo(0.622459352016449, 6); }, MAX_TEST_EXECUTION_TIME, ); }); };
transformers.js/tests/models/grounding_dino/test_processor_grounding_dino.js/0
{ "file_path": "transformers.js/tests/models/grounding_dino/test_processor_grounding_dino.js", "repo_id": "transformers.js", "token_count": 821 }
322
import { LlamaTokenizer, MistralForCausalLM } from "../../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { describe("MistralForCausalLM", () => { const model_id = "hf-internal-testing/tiny-random-MistralForCausalLM"; /** @type {MistralForCausalLM} */ let model; /** @type {LlamaTokenizer} */ let tokenizer; beforeAll(async () => { model = await MistralForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await LlamaTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer("hello"); const outputs = await model.generate({ ...inputs, max_length: 10, }); expect(outputs.tolist()).toEqual([[1n, 6312n, 28709n, 24704n, 8732n, 1310n, 9808n, 13771n, 27309n, 4779n]]); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const inputs = tokenizer(["hello", "hello world"], { padding: true }); const outputs = await model.generate({ ...inputs, max_length: 10, }); expect(outputs.tolist()).toEqual([ [2n, 1n, 6312n, 28709n, 24704n, 8732n, 1310n, 9808n, 13771n, 27309n], [1n, 6312n, 28709n, 1526n, 8687n, 5690n, 1770n, 30811n, 12501n, 3325n], ]); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/mistral/test_modeling_mistral.js/0
{ "file_path": "transformers.js/tests/models/mistral/test_modeling_mistral.js", "repo_id": "transformers.js", "token_count": 768 }
323
import { PatchTSMixerModel, PatchTSMixerForPrediction, Tensor } from "../../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { const dims = [64, 512, 7]; const prod = dims.reduce((a, b) => a * b, 1); const past_values = new Tensor( "float32", Float32Array.from({ length: prod }, (_, i) => i / prod), dims, ); describe("PatchTSMixerModel", () => { const model_id = "hf-internal-testing/tiny-random-PatchTSMixerModel"; /** @type {PatchTSMixerModel} */ let model; beforeAll(async () => { model = await PatchTSMixerModel.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it( "default", async () => { const { last_hidden_state } = await model({ past_values }); const { num_input_channels, num_patches, d_model } = model.config; expect(last_hidden_state.dims).toEqual([dims[0], num_input_channels, num_patches, d_model]); expect(last_hidden_state.mean().item()).toBeCloseTo(0.03344963490962982, 5); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("PatchTSMixerForPrediction", () => { const model_id = "onnx-community/granite-timeseries-patchtsmixer"; /** @type {PatchTSMixerForPrediction} */ let model; beforeAll(async () => { model = await PatchTSMixerForPrediction.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it( "default", async () => { const { prediction_outputs } = await model({ past_values }); const { prediction_length, num_input_channels } = model.config; expect(prediction_outputs.dims).toEqual([dims[0], prediction_length, num_input_channels]); expect(prediction_outputs.mean().item()).toBeCloseTo(0.5064773559570312, 5); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/patchtsmixer/test_modeling_patchtsmixer.js/0
{ "file_path": "transformers.js/tests/models/patchtsmixer/test_modeling_patchtsmixer.js", "repo_id": "transformers.js", "token_count": 887 }
324
import { pipeline, BackgroundRemovalPipeline, RawImage } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; import { load_cached_image } from "../asset_cache.js"; const PIPELINE_ID = "background-removal"; export default () => { describe("Background Removal", () => { describe("Portrait Segmentation", () => { const model_id = "Xenova/modnet"; /** @type {BackgroundRemovalPipeline} */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it("should be an instance of BackgroundRemovalPipeline", () => { expect(pipe).toBeInstanceOf(BackgroundRemovalPipeline); }); it( "single", async () => { const image = await load_cached_image("portrait_of_woman"); const output = await pipe(image); expect(output).toHaveLength(1); expect(output[0]).toBeInstanceOf(RawImage); expect(output[0].width).toEqual(image.width); expect(output[0].height).toEqual(image.height); expect(output[0].channels).toEqual(4); // With alpha channel }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("Selfie Segmentation", () => { const model_id = "onnx-community/mediapipe_selfie_segmentation"; /** @type {BackgroundRemovalPipeline } */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it( "single", async () => { const image = await load_cached_image("portrait_of_woman"); const output = await pipe(image); expect(output).toHaveLength(1); expect(output[0]).toBeInstanceOf(RawImage); expect(output[0].width).toEqual(image.width); expect(output[0].height).toEqual(image.height); expect(output[0].channels).toEqual(4); // With alpha channel }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); }); };
transformers.js/tests/pipelines/test_pipelines_background_removal.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_background_removal.js", "repo_id": "transformers.js", "token_count": 1023 }
325
import { pipeline, TextToAudioPipeline } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; const PIPELINE_ID = "text-to-audio"; export default () => { describe("Text to Audio", () => { const model_id = "Xenova/tiny-random-vits"; /** @type {TextToAudioPipeline} */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it("should be an instance of TextToAudioPipeline", () => { expect(pipe).toBeInstanceOf(TextToAudioPipeline); }); it( "default", async () => { const output = await pipe("hello"); expect(output.audio).toHaveLength(6400); // NOTE: The mean value is not deterministic, so we just check the first few digits expect(output.audio.reduce((a, b) => a + b, 0) / output.audio.length).toBeCloseTo(-0.0125, 2); expect(output.sampling_rate).toEqual(16000); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/pipelines/test_pipelines_text_to_audio.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_text_to_audio.js", "repo_id": "transformers.js", "token_count": 492 }
326
import { Tensor, cat, stack, layer_norm, ones_like, zeros_like, full_like, rand, std_mean } from "../../src/transformers.js"; import { init } from "../init.js"; import { compare } from "../test_utils.js"; init(); describe("Tensor operations", () => { describe("cat", () => { it("should concatenate on dim=0", () => { const t1 = new Tensor("float32", [1, 2, 3], [1, 3]); const t2 = new Tensor("float32", [4, 5, 6, 7, 8, 9], [2, 3]); const t3 = new Tensor("float32", [10, 11, 12], [1, 3]); const target1 = new Tensor("float32", [1, 2, 3, 4, 5, 6, 7, 8, 9], [3, 3]); const target2 = new Tensor("float32", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [4, 3]); // 2 tensors const concatenated1 = cat([t1, t2], 0); compare(concatenated1, target1, 1e-3); // 3 tensors const concatenated2 = cat([t1, t2, t3], 0); compare(concatenated2, target2, 1e-3); }); it("should concatenate on dim=1", () => { const t1 = new Tensor("float32", [1, 2, 3, -1, -2, -3], [2, 3, 1]); const t2 = new Tensor("float32", [4, -4], [2, 1, 1]); const t3 = new Tensor("float32", [5, 6, -5, -6], [2, 2, 1]); const target1 = new Tensor("float32", [1, 2, 3, 4, -1, -2, -3, -4], [2, 4, 1]); const target2 = new Tensor("float32", [1, 2, 3, 4, 5, 6, -1, -2, -3, -4, -5, -6], [2, 6, 1]); // 2 tensors const concatenated1 = cat([t1, t2], 1); compare(concatenated1, target1, 1e-3); // 3 tensors const concatenated2 = cat([t1, t2, t3], 1); compare(concatenated2, target2, 1e-3); }); it("should concatenate on dim=-2", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16], [2, 1, 3, 2]); const t2 = new Tensor("float32", [7, 8, 9, 10, 17, 18, 19, 20], [2, 1, 2, 2]); const target = new Tensor("float32", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [2, 1, 5, 2]); const concatenated = cat([t1, t2], -2); compare(concatenated, target, 1e-3); }); // TODO add tests for errors }); describe("slice", () => { it("should return a given row dim", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice(1); const target = new Tensor("float32", [3, 4], [2]); compare(t2, target); }); it("should return a range of rows", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice([1, 3]); const target = new Tensor("float32", [3, 4, 5, 6], [2, 2]); compare(t2, target); }); it("should return a crop", () => { const t1 = new Tensor( "float32", Array.from({ length: 28 }, (_, i) => i + 1), [4, 7], ); const t2 = t1.slice([1, -1], [1, -1]); const target = new Tensor("float32", [9, 10, 11, 12, 13, 16, 17, 18, 19, 20], [2, 5]); compare(t2, target); }); it("should return the whole tensor when all indices are null/unset", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice(); compare(t2, t1); }); it("should return the whole dimension when index is null", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice(null); compare(t2, t1); }); it("should slice from index to end when [start, null] is used", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice([1, null]); const target = new Tensor("float32", [3, 4, 5, 6], [2, 2]); compare(t2, target); }); it("should slice from beginning to index when [null, end] is used", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice([null, 2]); const target = new Tensor("float32", [1, 2, 3, 4], [2, 2]); compare(t2, target); }); it("should handle [null, null] as full slice", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice([null, null]); compare(t2, t1); }); it("should select a single element when a number is used in slice", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice(2, 1); const target = new Tensor("float32", [6], []); compare(t2, target); }); it("should select a single row when a number is used in slice", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice(0); const target = new Tensor("float32", [1, 2], [2]); compare(t2, target); }); it("should select a single column when a number is used in slice", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice(null, 1); const target = new Tensor("float32", [2, 4, 6], [3]); compare(t2, target); }); it("should handle negative indices in slice", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice(-1); const target = new Tensor("float32", [5, 6], [2]); compare(t2, target); }); }); describe("stack", () => { const t1 = new Tensor("float32", [0, 1, 2, 3, 4, 5], [1, 3, 2]); it("should stack on dim=0", () => { const target1 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [2, 1, 3, 2]); const target2 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [3, 1, 3, 2]); // 2 tensors const stacked1 = stack([t1, t1], 0); compare(stacked1, target1, 1e-3); // 3 tensors const stacked2 = stack([t1, t1, t1], 0); compare(stacked2, target2, 1e-3); }); it("should stack on dim=1", () => { const target1 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [1, 2, 3, 2]); const target2 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [1, 3, 3, 2]); // 2 tensors const stacked1 = stack([t1, t1], 1); compare(stacked1, target1, 1e-3); // 3 tensors const stacked2 = stack([t1, t1, t1], 1); compare(stacked2, target2, 1e-3); }); it("should stack on dim=-1", () => { const target1 = new Tensor("float32", [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5], [1, 3, 2, 2]); const target2 = new Tensor("float32", [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5], [1, 3, 2, 3]); // 2 tensors const stacked1 = stack([t1, t1], -1); compare(stacked1, target1, 1e-3); // 3 tensors const stacked2 = stack([t1, t1, t1], -1); compare(stacked2, target2, 1e-3); }); }); describe("permute", () => { it("should permute", () => { const x = new Tensor("float32", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [2, 3, 4]); // Permute axes to (0, 1, 2) - No change const permuted_1 = x.permute(0, 1, 2); const target_1 = x; compare(permuted_1, target_1, 1e-3); // Permute axes to (0, 2, 1) const permuted_2 = x.permute(0, 2, 1); const target_2 = new Tensor("float32", [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11, 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23], [2, 4, 3]); compare(permuted_2, target_2, 1e-3); // Permute axes to (1, 0, 2) const permuted_3 = x.permute(1, 0, 2); const target_3 = new Tensor("float32", [0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23], [3, 2, 4]); compare(permuted_3, target_3, 1e-3); // Permute axes to (1, 2, 0) const permuted_4 = x.permute(1, 2, 0); const target_4 = new Tensor("float32", [0, 12, 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23], [3, 4, 2]); compare(permuted_4, target_4, 1e-3); // Permute axes to (2, 0, 1) const permuted_5 = x.permute(2, 0, 1); const target_5 = new Tensor("float32", [0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23], [4, 2, 3]); compare(permuted_5, target_5, 1e-3); // Permute axes to (2, 1, 0) const permuted_6 = x.permute(2, 1, 0); const target_6 = new Tensor("float32", [0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23], [4, 3, 2]); compare(permuted_6, target_6, 1e-3); }); }); describe("map", () => { it("should double", () => { const original = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [2, 4, 6, 8, 10, 12], [2, 3]); const doubled = original.map((x) => x * 2); compare(doubled, target, 1e-3); }); }); describe("mean", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3, 1]); it("should calculate mean over the entire tensor", () => { const target = new Tensor("float32", [3.5], []); compare(t1.mean(), target, 1e-3); }); it("should calculate mean over dimension 0", () => { const target0 = new Tensor("float32", [2.5, 3.5, 4.5], [3, 1]); compare(t1.mean(0), target0, 1e-3); }); it("should calculate mean over dimension 1", () => { const target1 = new Tensor("float32", [2, 5], [2, 1]); compare(t1.mean(1), target1, 1e-3); }); it("should calculate mean over dimension -1", () => { const target2 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); compare(t1.mean(-1), target2, 1e-3); }); }); describe("std_mean", () => { it("should return std_mean for the entire tensor", () => { const t = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const [stdVal, meanVal] = std_mean(t); compare(stdVal, new Tensor("float32", [1.8708287477493286], []), 1e-3); compare(meanVal, new Tensor("float32", [3.5], []), 1e-3); }); }); describe("min", () => { it("should return the minimum over the entire tensor", () => { const t1 = new Tensor("float32", [3, -2, 5, 0], [2, 2]); const target = new Tensor("float32", [-2], []); const result = t1.min(); compare(result, target, 1e-3); }); it("should return the minimum over dimension 1", () => { const t2 = new Tensor("float32", [4, 2, -1, 0, 6, 5], [3, 2]); const target = new Tensor("float32", [2, -1, 5], [3]); const result = t2.min(1); compare(result, target, 1e-3); }); }); describe("max", () => { it("should return the maximum over the entire tensor", () => { const t1 = new Tensor("float32", [3, 10, -2, 7], [2, 2]); const target = new Tensor("float32", [10], []); const result = t1.max(); compare(result, target, 1e-3); }); it("should return the maximum over dimension 0", () => { const t2 = new Tensor("float32", [1, 2, 4, 5, 9, 3], [3, 2]); const target = new Tensor("float32", [9, 5], [2]); const result = t2.max(0); compare(result, target, 1e-3); }); }); describe("sum", () => { it("should calculate sum over entire tensor", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [21], []); const result = t1.sum(); compare(result, target, 1e-3); }); it("should calculate sum over dimension 0", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [5, 7, 9], [3]); const result = t1.sum(0); compare(result, target, 1e-3); }); it("should calculate sum over dimension 1", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [6, 15], [2]); const result = t1.sum(1); compare(result, target, 1e-3); }); }); describe("norm", () => { it("should calculate L2 norm over entire tensor", () => { const t1 = new Tensor("float32", [3, 4], [2]); const target = new Tensor("float32", [5], []); const result = t1.norm(); compare(result, target, 1e-3); }); it("should calculate L2 norm over dimension 0", () => { const t1 = new Tensor("float32", [3, 4, 6, 8], [2, 2]); const target = new Tensor("float32", [6.7082, 8.9443], [2]); const result = t1.norm(2, 0); compare(result, target, 1e-2); }); }); describe("normalize", () => { it("should normalize a vector correctly", () => { const t1 = new Tensor("float32", [3, 4], [1, 2]); const target = new Tensor("float32", [0.6, 0.8], [1, 2]); const normalized = t1.normalize(); compare(normalized, target, 1e-3); }); it("should normalize along dimension", () => { const t1 = new Tensor("float32", [1, 2, 2, 3], [2, 2]); const target = new Tensor("float32", [0.4472, 0.8944, 0.5547, 0.8321], [2, 2]); const normalized = t1.normalize(); compare(normalized, target, 1e-3); }); }); describe("layer_norm", () => { it("should calculate layer norm", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [-1.2247356176376343, 0.0, 1.2247356176376343, -1.2247357368469238, -1.1920928955078125e-7, 1.2247354984283447], [2, 3]); const norm = layer_norm(t1, [t1.dims.at(-1)]); compare(norm, target, 1e-3); }); }); describe("sigmoid", () => { it("should apply the sigmoid function to each element in the tensor", () => { const t1 = new Tensor("float32", [0, 1, -1, 5, -5], [5]); const target = new Tensor("float32", [0.5, 1 / (1 + Math.exp(-1)), 1 / (1 + Math.exp(1)), 1 / (1 + Math.exp(-5)), 1 / (1 + Math.exp(5))], [5]); const result = t1.sigmoid(); compare(result, target, 1e-3); }); }); describe("tolist", () => { it("should return nested arrays for a 2D tensor", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const arr = t1.tolist(); compare(arr, [ [1, 2], [3, 4], ]); }); }); describe("mul", () => { it("should multiply constant", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const target = new Tensor("float32", [2, 4, 6, 8], [2, 2]); const result = t1.mul(2); compare(result, target, 1e-3); }); }); describe("div", () => { it("should divide constant", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const target = new Tensor("float32", [0.5, 1, 1.5, 2], [2, 2]); const result = t1.div(2); compare(result, target, 1e-3); }); }); describe("add", () => { it("should add constant", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const target = new Tensor("float32", [3, 4, 5, 6], [2, 2]); const result = t1.add(2); compare(result, target, 1e-3); }); }); describe("sub", () => { it("should subtract constant", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const target = new Tensor("float32", [-1, 0, 1, 2], [2, 2]); const result = t1.sub(2); compare(result, target, 1e-3); }); }); describe("gt", () => { it("should perform element-wise greater than comparison with a scalar", () => { const t1 = new Tensor("float32", [1, 5, 3, 7], [4]); const target = new Tensor("bool", [0, 1, 0, 1], [4]); const result = t1.gt(4); compare(result, target, 1e-3); }); }); describe("lt", () => { it("should perform element-wise less than comparison with a scalar", () => { const t1 = new Tensor("float32", [1, 5, 3, 7], [4]); const target = new Tensor("bool", [1, 0, 1, 0], [4]); const result = t1.lt(4); compare(result, target, 1e-3); }); }); describe("squeeze", () => { it("should remove all dimensions of size 1", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [1, 4]); const target = new Tensor("float32", [1, 2, 3, 4], [4]); const result = t1.squeeze(); compare(result, target, 1e-3); }); it("should remove a specified dimension", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [1, 1, 2, 2]); const result = t1.squeeze(1); const target = new Tensor("float32", [1, 2, 3, 4], [1, 2, 2]); compare(result, target, 1e-3); }); it("should remove multiple dimensions", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [1, 1, 2, 1, 2]); const result = t1.squeeze([0, 3]); const target = new Tensor("float32", [1, 2, 3, 4], [1, 2, 2]); compare(result, target, 1e-3); }); }); describe("unsqueeze", () => { it("should add a dimension at the specified axis", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [4]); const target = new Tensor("float32", [1, 2, 3, 4], [1, 4]); const result = t1.unsqueeze(0); compare(result, target, 1e-3); }); }); describe("flatten", () => { it("should flatten a 2D tensor into 1D by default", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [6]); const result = t1.flatten(); compare(result, target, 1e-3); }); }); describe("neg", () => { it("should compute the negative of each element in the tensor", () => { const t1 = new Tensor("float32", [1, -2, 0, 3], [4]); const target = new Tensor("float32", [-1, 2, -0, -3], [4]); const result = t1.neg(); compare(result, target, 1e-3); }); }); describe("view", () => { it("should reshape the tensor to the specified dimensions", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const result = t1.view(3, 2); compare(result, target, 1e-3); }); it("should reshape the tensor with an inferred dimension (-1)", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [1, 6]); const result = t1.view(1, -1); compare(result, target, 1e-3); }); it("should throw if multiple inferred dimensions are used", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); expect(() => t1.view(-1, -1)).toThrow(); }); }); describe("clamp", () => { it("should clamp values between min and max", () => { const t1 = new Tensor("float32", [-2, -1, 0, 1, 2, 3], [6]); const target = new Tensor("float32", [-1, -1, 0, 1, 2, 2], [6]); const result = t1.clamp(-1, 2); compare(result, target, 1e-3); }); }); describe("round", () => { it("should round elements to the nearest integer", () => { const t1 = new Tensor("float32", [0.1, 1.4, 2.5, 3.9, -1.2], [5]); const target = new Tensor("float32", [0, 1, 3, 4, -1], [5]); const result = t1.round(); compare(result, target, 1e-3); }); }); describe("ones_like", () => { it("should create a tensor of all ones with the same shape as the input", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const result = ones_like(t1); const target = new Tensor("int64", [1n, 1n, 1n, 1n], [2, 2]); compare(result, target, 1e-3); }); }); describe("zeros_like", () => { it("should create a tensor of all zeros with the same shape as the input", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const result = zeros_like(t1); const target = new Tensor("int64", [0n, 0n, 0n, 0n], [2, 2]); compare(result, target, 1e-3); }); }); describe("full_like", () => { it("should create a tensor filled with a number, matching the shape of the original", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const result = full_like(t1, 10); const target = new Tensor("float32", [10, 10, 10, 10], [2, 2]); compare(result, target, 1e-3); }); it("should create a boolean tensor with the same shape", () => { const t2 = new Tensor("bool", [true, false], [2]); const result = full_like(t2, true); const target = new Tensor("bool", [true, true], [2]); compare(result, target, 1e-3); }); it("should create a bigint tensor with the same shape", () => { const t3 = new Tensor("int64", [1n, 2n], [2]); const result = full_like(t3, 123n); const target = new Tensor("int64", [123n, 123n], [2]); compare(result, target, 1e-3); }); }); describe("rand", () => { it("should create a tensor of random values between 0 and 1 with the given shape", () => { const shape = [2, 2]; const random = rand(shape); expect(random.type).toBe("float32"); expect(random.dims).toEqual(shape); random.data.forEach((val) => { expect(val).toBeGreaterThanOrEqual(0); expect(val).toBeLessThan(1); }); }); }); describe("to", () => { it("float32 to int32 (number to number)", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("int32", [1, 2, 3, 4, 5, 6], [2, 3]); const t2 = t1.to("int32"); compare(t2, target); }); it("float32 to int64 (number to bigint)", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("int64", [1n, 2n, 3n, 4n, 5n, 6n], [2, 3]); const t2 = t1.to("int64"); compare(t2, target); }); it("int64 to float32 (bigint to number)", () => { const t1 = new Tensor("int64", [1n, 2n, 3n, 4n, 5n, 6n], [2, 3]); const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const t2 = t1.to("float32"); compare(t2, target); }); it("int32 to uint32", () => { const t1 = new Tensor("int32", [-1, 2, -3, 4, -5, 6], [2, 3]); const target = new Tensor("uint32", [4294967295, 2, 4294967293, 4, 4294967291, 6], [2, 3]); const t2 = t1.to("uint32"); compare(t2, target); }); it("int16 to int8 (overflow)", () => { const t1 = new Tensor("int16", [0, 1, 128, 256, 257, 512], [2, 3]); const target = new Tensor("int8", [0, 1, -128, 0, 1, 0], [2, 3]); const t2 = t1.to("int8"); compare(t2, target); }); }); });
transformers.js/tests/utils/tensor.test.js/0
{ "file_path": "transformers.js/tests/utils/tensor.test.js", "repo_id": "transformers.js", "token_count": 10111 }
327
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Run benchmark using the `optimum-benchmark` library with some customization in `transformers`. Assume we are under `transformers` root directory: (make sure the commits are valid commits) ```bash python benchmark/benchmark.py --config-dir benchmark/config --config-name generation --commit=9b9c7f03da625b13643e99205c691fe046461724 --metrics=decode.latency.mean,per_token.latency.mean,per_token.throughput.value backend.model=google/gemma-2b benchmark.input_shapes.sequence_length=5,7 benchmark.input_shapes.batch_size=1,2 --multirun ``` """ import argparse import glob import json import os.path import re import tempfile from contextlib import contextmanager from pathlib import Path from git import Repo from huggingface_hub import HfApi from optimum_benchmark import Benchmark from optimum_benchmark_wrapper import main PATH_TO_REPO = Path(__file__).parent.parent.resolve() @contextmanager def checkout_commit(repo: Repo, commit_id: str): """ Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). commit_id (`str`): The commit reference to checkout inside the context manager. """ current_head = repo.head.commit if repo.head.is_detached else repo.head.ref try: repo.git.checkout(commit_id) yield finally: repo.git.checkout(current_head) def summarize(run_dir, metrics, expand_metrics=False): """Produce a summary for each optimum-benchmark launched job's output directory found in `run_dir`. Each summary's format is as follows (for `expand_metrics=False`): ``` { "model": "google/gemma-2b", "commit": "3cd6ed22e4d49219f300f5055e71e3929aba20d7", "config": "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5", "metrics": { "decode.latency.mean": 1.624666809082031, "per_token.latency.mean": 0.012843788806628804, "per_token.throughput.value": 77.85864553330948 } } ``` """ reports = glob.glob(os.path.join(run_dir, "**/benchmark_report.json"), recursive=True) report_dirs = [str(Path(report).parent) for report in reports] summaries = [] for report_dir in report_dirs: commit = re.search(r"/commit=([^/]+)", report_dir).groups()[0] if not os.path.isfile(os.path.join(report_dir, "benchmark.json")): continue benchmark = Benchmark.from_json(os.path.join(report_dir, "benchmark.json")) report = benchmark.report model = benchmark.config.backend["model"] # This looks like `benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5`. # (we rely on the usage of hydra's `${hydra.job.override_dirname}`.) benchmark_name = re.sub(f"backend.model={model},*", "", report_dir) benchmark_name = str(Path(benchmark_name).parts[-1]) if benchmark_name.startswith("commit="): benchmark_name = benchmark.config.name metrics_values = {} # post-processing of report: show a few selected/important metric for metric in metrics: keys = metric.split(".") value = report.to_dict() current = metrics_values for key in keys: # Avoid KeyError when a user's specified metric has typo. # TODO: Give warnings. if key not in value: continue value = value[key] if expand_metrics: if isinstance(value, dict): if key not in current: current[key] = {} current = current[key] else: current[key] = value if not expand_metrics: metrics_values[metric] = value # show some config information print(f"model: {model}") print(f"commit: {commit}") print(f"config: {benchmark_name}") if len(metrics_values) > 0: print("metrics:") if expand_metrics: print(metrics_values) else: for metric, value in metrics_values.items(): print(f" - {metric}: {value}") print("-" * 80) summary = { "model": model, "commit": commit, "config": benchmark_name, "metrics": metrics_values, } summaries.append(summary) with open(os.path.join(report_dir, "summary.json"), "w") as fp: json.dump(summary, fp, indent=4) return summaries def combine_summaries(summaries): """Combine a list of summary obtained from the function `summarize`. The combined summary's format is as follows: ``` "google/gemma-2b": { "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.624666809082031} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": {"decode.latency.mean": 1.6278163452148438} } }, "benchmark.input_shapes.batch_size=2,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.6947791748046876} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": { "decode.latency.mean": 1.6980519409179688} } } } ``` """ combined = {} for summary in summaries: model = summary["model"] config = summary["config"] commit = summary["commit"] if model not in combined: combined[model] = {} if config not in combined[model]: combined[model][config] = {} if commit not in combined[model][config]: combined[model][config][commit] = {"metrics": summary["metrics"]} with open(os.path.join(exp_run_dir, "summary.json"), "w") as fp: json.dump(combined, fp, indent=4) print(json.dumps(combined, indent=4)) return combined if __name__ == "__main__": def list_str(values): return values.split(",") parser = argparse.ArgumentParser() parser.add_argument("--config-dir", type=str, required=True, help="The path to the config directory.") parser.add_argument("--config-name", type=str, required=True, help="The config name.") # arguments specific to this wrapper for our own customization parser.add_argument("--ensure_empty", type=bool, default=True, help="If to create a temporary directory.") parser.add_argument( "--commit", type=list_str, default="", help="Comma-separated list of branch names and/or commit sha values on which the benchmark will run. If `diff` is specified, it will run on both the current head and the `main` branch.", ) parser.add_argument("--metrics", type=str, help="The metrics to be included in the summary.") parser.add_argument("--repo_id", type=str, default=None, help="The repository to which the file will be uploaded.") parser.add_argument("--path_in_repo", type=str, default=None, help="Relative filepath in the repo.") parser.add_argument("--token", type=str, default=None, help="A valid user access token (string).") args, optimum_benchmark_args = parser.parse_known_args() repo = Repo(PATH_TO_REPO) metrics = [ "prefill.latency.mean", "prefill.throughput.value", "decode.latency.mean", "decode.throughput.value", "per_token.latency.mean", "per_token.throughput.value", ] if args.metrics is not None: metrics = args.metrics.split(",") # Get `backend.model` in a hacky way: We want to control the experiment flow manually. models = [""] for idx, arg in enumerate(optimum_benchmark_args): if arg.startswith("backend.model="): models = arg[len("backend.model=") :] models = models.split(",") break optimum_benchmark_args = [arg for arg in optimum_benchmark_args if not arg.startswith("backend.model=")] # Get the commit(s) current_head = str(repo.head.commit) if repo.head.is_detached else str(repo.head.ref) commits = [x for x in args.commit if x != ""] if len(commits) == 0: commits = [current_head] elif len(commits) == 1 and commits[0] == "diff": # compare to `main` commits = ["main", current_head] # Get the specified run directory run_dir_arg_idx, run_dir = -1, None sweep_dir_arg_idx, sweep_dir = -1, None for idx, arg in enumerate(optimum_benchmark_args): if arg.startswith("hydra.run.dir="): run_dir = arg[len("hydra.run.dir=") :] run_dir_arg_idx = idx elif arg.startswith("hydra.sweep.dir="): sweep_dir = arg[len("hydra.sweep.dir=") :] sweep_dir_arg_idx = idx exp_run_dir, arg_dix, arg_name = ( (sweep_dir, sweep_dir_arg_idx, "hydra.sweep.dir") if "--multirun" in optimum_benchmark_args else (run_dir, run_dir_arg_idx, "hydra.run.dir") ) # TODO: not hardcoded if exp_run_dir is None and args.ensure_empty: exp_run_dir = "_benchmark" if args.ensure_empty: os.makedirs(exp_run_dir, exist_ok=True) exp_run_dir = tempfile.mkdtemp(dir=exp_run_dir) run_summaries = [] for commit in commits: with checkout_commit(repo, commit): commit = str(repo.head.commit) commit_run_dir = exp_run_dir if exp_run_dir is not None: commit_run_dir = os.path.join(exp_run_dir, rf"commit\={commit}") print(f"Run benchmark on commit: {commit}") for model in models: model_arg = [f"backend.model={model}"] if model != "" else [] dir_args = [] if commit_run_dir is not None: if arg_dix > -1: optimum_benchmark_args[arg_dix] = f"{arg_name}={commit_run_dir}" else: dir_args = [ f"hydra.sweep.dir={commit_run_dir}", f"hydra.run.dir={commit_run_dir}/" + "${hydra.job.override_dirname}", ] main(args.config_dir, args.config_name, model_arg + dir_args + optimum_benchmark_args) if commit_run_dir is not None: # Need to remove the `\` character summaries = summarize(commit_run_dir.replace("\\", ""), metrics) run_summaries.extend(summaries) # aggregate the information across the commits if exp_run_dir is not None: with open(os.path.join(exp_run_dir, "summaries.json"), "w") as fp: json.dump(run_summaries, fp, indent=4) combined_summary = combine_summaries(run_summaries) if args.repo_id is not None and args.path_in_repo is not None: # Upload to Hub api = HfApi() api.upload_folder( folder_path=exp_run_dir, path_in_repo=args.path_in_repo, repo_id=args.repo_id, repo_type="dataset", token=args.token, )
transformers/benchmark/benchmark.py/0
{ "file_path": "transformers/benchmark/benchmark.py", "repo_id": "transformers", "token_count": 5439 }
328
FROM python:3.9-slim ENV PYTHONDONTWRITEBYTECODE=1 ARG REF=main USER root RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git g++ cmake ENV UV_PYTHON=/usr/local/bin/python RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools RUN uv pip install --no-cache-dir "scipy<1.13" "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,testing,sentencepiece,flax-speech,vision]" RUN uv pip uninstall transformers RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
transformers/docker/jax-light.dockerfile/0
{ "file_path": "transformers/docker/jax-light.dockerfile", "repo_id": "transformers", "token_count": 224 }
329
FROM nvidia/cuda:12.6.0-cudnn-devel-ubuntu22.04 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive RUN apt update RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg RUN python3 -m pip install --no-cache-dir --upgrade pip ARG REF=main RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF # If set to nothing, will install the latest version ARG PYTORCH='2.8.0' ARG TORCH_VISION='' ARG TORCH_AUDIO='' # Example: `cu102`, `cu113`, etc. ARG CUDA='cu126' RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing,video] # Install torch stuff after ./transformers[dev-torch,testing,video], otherwise torch may be resolved to a previous # version. RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA RUN [ ${#TORCH_VISION} -gt 0 ] && VERSION='torchvision=='TORCH_VISION'.*' || VERSION='torchvision'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA RUN [ ${#TORCH_AUDIO} -gt 0 ] && VERSION='torchaudio=='TORCH_AUDIO'.*' || VERSION='torchaudio'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA RUN python3 -m pip uninstall -y tensorflow flax RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract RUN python3 -m pip install -U "itsdangerous<2.1.0" # `kernels` may give different outputs (within 1e-5 range) even with the same model (weights) and the same inputs RUN python3 -m pip uninstall -y kernels # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop
transformers/docker/transformers-pytorch-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-pytorch-gpu/Dockerfile", "repo_id": "transformers", "token_count": 684 }
330
# BERTology يُشهد في الآونة الأخيرة نمو مجال دراسي يُعنى باستكشاف آلية عمل نماذج المحولات الضخمة مثل BERT (والذي يُطلق عليها البعض اسم "BERTology"). ومن الأمثلة البارزة على هذا المجال ما يلي: - BERT Rediscovers the Classical NLP Pipeline بواسطة Ian Tenney و Dipanjan Das و Ellie Pavlick: https://huggingface.co/papers/1905.05950 - Are Sixteen Heads Really Better than One? بواسطة Paul Michel و Omer Levy و Graham Neubig: https://huggingface.co/papers/1905.10650 - What Does BERT Look At? An Analysis of BERT's Attention بواسطة Kevin Clark و Urvashi Khandelwal و Omer Levy و Christopher D. Manning: https://huggingface.co/papers/1906.04341 - CAT-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure: https://huggingface.co/papers/2210.04633 لإثراء هذا المجال الناشئ، قمنا بتضمين بعض الميزات الإضافية في نماذج BERT/GPT/GPT-2 للسماح للناس بالوصول إلى التمثيلات الداخلية، والتي تم تكييفها بشكل أساسي من العمل الرائد لـ Paul Michel (https://huggingface.co/papers/1905.10650): - الوصول إلى جميع الحالات المخفية في BERT/GPT/GPT-2، - الوصول إلى جميع أوزان الانتباه لكل رأس في BERT/GPT/GPT-2، - استرجاع قيم ومشتقات مخرجات الرأس لحساب درجة أهمية الرأس وحذفه كما هو موضح في https://huggingface.co/papers/1905.10650. ولمساعدتك على فهم واستخدام هذه الميزات بسهولة، أضفنا مثالًا برمجيًا محددًا: [bertology.py](https://github.com/huggingface/transformers-research-projects/tree/main/bertology/run_bertology.py) أثناء استخراج المعلومات وتقليص من نموذج تم تدريبه مسبقًا على GLUE.
transformers/docs/source/ar/bertology.md/0
{ "file_path": "transformers/docs/source/ar/bertology.md", "repo_id": "transformers", "token_count": 973 }
331
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # نمذجة اللغة السببية (Causal language modeling) [[open-in-colab]] هناك نوعان من نمذجة اللغة، السببية والمقنعة. يوضح هذا الدليل نمذجة اللغة السببية. تُستخدم نماذج اللغة السببية غالبًا لتوليد النص. يمكنك استخدام هذه النماذج للتطبيقات الإبداعية مثل اختيار مغامرة النص الخاصة بك أو مساعد ترميز ذكي مثل Copilot أو CodeParrot. <Youtube id="Vpjb1lu0MDk"/> تتنبأ نمذجة اللغة السببية بالرمز التالي في تسلسل من الرموز، ولا يمكن للنموذج سوى الاهتمام بالرموز على اليسار. هذا يعني أن النموذج لا يمكنه رؤية الرموز المستقبلية. GPT-2 هو مثال على نموذج اللغة السببية. سيوضح لك هذا الدليل كيفية: 1. ضبط دقيق [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) على مجموعة فرعية [r/askscience](https://www.reddit.com/r/askscience/) من مجموعة بيانات [ELI5](https://huggingface.co/datasets/eli5). 2. استخدام النموذج المدرب الخاص بك للاستنتاج. <Tip> لرؤية جميع العمارات ونقاط التحقق المتوافقة مع هذه المهمة، نوصي بالتحقق من [task-page](https://huggingface.co/tasks/text-generation) </Tip> قبل أن تبدأ، تأكد من تثبيت جميع المكتبات الضرورية: ```bash pip install transformers datasets evaluate ``` نحن نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل ومشاركة نموذجك مع المجتمع. عند المطالبة، أدخل رمزك لتسجيل الدخول: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## تحميل مجموعة بيانات ELI5 ابدأ بتحميل أول 5000 مثال من [ELI5-Category](https://huggingface.co/datasets/eli5_category) مجموعة البيانات مع مكتبة 🤗 Datasets. سيعطيك هذا فرصة للتجربة والتأكد من أن كل شيء يعمل قبل قضاء المزيد من الوقت في التدريب على مجموعة البيانات الكاملة. ```py >>> from datasets import load_dataset >>> eli5 = load_dataset("eli5_category", split="train[:5000]") ``` قم بتقسيم مجموعة بيانات `train` إلى مجموعتي تدريب واختبار باستخدام الخاصية [`~datasets.Dataset.train_test_split`]: ```py >>> eli5 = eli5.train_test_split(test_size=0.2) ``` ثم ألق نظرة على مثال: ```py >>> eli5["train"][0] {'q_id': '7h191n', 'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?', 'selftext': '', 'category': 'Economics', 'subreddit': 'explainlikeimfive', 'answers': {'a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'], 'text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.", 'None yet. It has to be reconciled with a vastly different house bill and then passed again.', 'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?', 'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'], 'score': [21, 19, 5, 3], 'text_urls': [[], [], [], ['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']]}, 'title_urls': ['url'], 'selftext_urls': ['url']} ``` على الرغم من أن هذا قد يبدو معقدًا، إلا أنك مهتم حقًا بحقل `text`. ما هو رائع حول مهام نمذجة اللغة أنت لا تحتاج إلى تسميات (تُعرف أيضًا باسم المهمة غير الخاضعة للإشراف) لأن الكلمة التالية تعمل كتسمية. ## معالجة مسبقة (Preprocess) <Youtube id="ma1TrR7gE7I"/> الخطوة التالية هي تحميل مجزء النص DistilGPT2 لمعالجة حقل `text` الفرعي: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") ``` ستلاحظ من المثال أعلاه، الحقل `text` هو في الواقع متداخل داخل `answers`. هذا يعني أنك ستحتاج إلى استخراج حقل `text` الفرعي من بنيته المتداخلة باستخدام الدالة [`flatten`](https://huggingface.co/docs/datasets/process#flatten): ```py >>> eli5 = eli5.flatten() >>> eli5["train"][0] {'q_id': '7h191n', 'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?', 'selftext': '', 'category': 'Economics', 'subreddit': 'explainlikeimfive', 'answers.a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'], 'answers.text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.", 'None yet. It has to be reconciled with a vastly different house bill and then passed again.', 'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?', 'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'], 'answers.score': [21, 19, 5, 3], 'answers.text_urls': [[], [], [], ['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']], 'title_urls': ['url'], 'selftext_urls': ['url']} ``` كل حقل فرعي هو الآن عموداً منفصلاً مسبوقاً بـ `answers`، وحقل `text` هو قائمة الآن. بدلاً من ذلك من تجزائة نص كل جملة بشكل منفصل، قم بتحويل القائمة إلى سلسلة حتى تتمكن من تجزئة نصها بشكل مجمّع. هنا أول دالة معالجة مسبقة لدمج قائمة السلاسل لكل مثال ومجزىء النتيجة: ```py >>> def preprocess_function(examples): ... return tokenizer([" ".join(x) for x in examples["answers.text"]]) ``` لتطبيق دالة المعالجة المسبقة هذه على مجموعة البيانات بأكملها، استخدم الدالة 🤗 Datasets [`~datasets.Dataset.map`]. يمكنك تسريع هذه العملية `map` عن طريق تعيين `batched=True` لمعالجة عناصر متعددة من مجموعة البيانات في وقت واحد، وزيادة عدد العمليات مع `num_proc`. احذف أي أعمدة لا تحتاجها: ```py >>> tokenized_eli5 = eli5.map( ... preprocess_function, ... batched=True, ... num_proc=4, ... remove_columns=eli5["train"].column_names, ... ) ``` تحتوي هذه المجموعة من البيانات على تسلسلات الرموز، ولكن بعضها أطول من الطول الأقصى للمدخلات للنموذج. يمكنك الآن استخدام دالة ما قبل المعالجة ثانية لـ: - تجميع كل التسلسلات. - تقسيم التسلسلات المجمّعة إلى أجزاء أقصر محددة، بحجم `block_size`، والتي يجب أن تكون أقصر من الطول الأقصى للمدخلات ومناسبة لذاكرة GPU. ```py >>> block_size = 128 >>> def group_texts(examples): ... # ربط جميع النصوص. ... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} ... total_length = len(concatenated_examples[list(examples.keys())[0]]) ... # نتجاهل الباقي الصغير، يمكننا إضافة الحشو إذا كان النموذج يدعمه بدلاً من هذا الإسقاط، يمكنك ... # تخصيص هذا الجزء حسب احتياجاتك. ... if total_length >= block_size: ... total_length = (total_length // block_size) * block_size ... # التقسيم إلى أجزاء بحجم block_size. ... result = { ... k: [t[i : i + block_size] for i in range(0, total_length, block_size)] ... for k, t in concatenated_examples.items() ... } ... result["labels"] = result["input_ids"].copy() ... return result ``` طبق دالة `group_texts` على كامل المجموعة من البيانات: ```py >>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4) ``` الآن قم بإنشاء دفعة من الأمثلة باستخدام [`DataCollatorForLanguageModeling`]. من الأفضل أن تقوم بـ *الحشو الديناميكي* للجمل إلى الطول الأطول في الدفعة أثناء التجميع، بدلاً من حشو كامل المجموعة من البيانات إلى الطول الأقصى. <frameworkcontent> <pt> استخدم رمز نهاية التسلسل كرمز للحشو، وحدد `mlm_probability` لحجب الرموز بشكل عشوائي عند كل تكرار للبيانات: ```py >>> from transformers import DataCollatorForLanguageModeling >>> tokenizer.pad_token = tokenizer.eos_token >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) ``` </pt> <tf> استخدم رمز نهاية التسلسل كرمز للحشو، وحدد `mlm_probability` لحجب الرموز بشكل عشوائي عند كل تكرار للبيانات: ```py >>> from transformers import DataCollatorForLanguageModeling >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf") ``` </tf> </frameworkcontent> ## التدريب (Train) <frameworkcontent> <pt> <Tip> إذا لم تكن على دراية بتدريب نموذج باستخدام [`Trainer`], اطلع على [البرنامج التعليمي الأساسي](../training#train-with-pytorch-trainer)! </Tip> أنت جاهز الآن لبدء تدريب نموذجك! قم بتحميل DistilGPT2 باستخدام [`AutoModelForCausalLM`]: ```py >>> from transformers import AutoModelForCausalLM, TrainingArguments, Trainer >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` في هذه المرحلة، تبقى ثلاث خطوات فقط: 1. حدد معلمات التدريب الخاصة بك في [`TrainingArguments`]. المعامل الوحيد المطلوب هو `output_dir` الذي يحدد أين سيتم حفظ نموذجك. ستقوم بدفع هذا النموذج إلى Hub بتحديد `push_to_hub=True` (يجب أن تكون مسجلاً الدخول إلى Hugging Face لتحميل نموذجك). 2. قم بتمرير معاملات التدريب إلى [`Trainer`] إلى جانب النموذج، والمجموعات من البيانات، ومجمّع البيانات. 3. قم باستدعاء [`~Trainer.train`] لتدريب نموذجك. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_eli5_clm-model", ... eval_strategy="epoch", ... learning_rate=2e-5, ... weight_decay=0.01, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=lm_dataset["train"], ... eval_dataset=lm_dataset["test"], ... data_collator=data_collator, ... tokenizer=tokenizer, ... ) >>> trainer.train() ``` بمجرد اكتمال التدريب، استخدم طريقة [`~transformers.Trainer.evaluate`] لتقييم نموذجك والحصول على احتمالية الارتباك: ```py >>> import math >>> eval_results = trainer.evaluate() >>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}") Perplexity: 49.61 ``` ثم شارك نموذجك على Hub باستخدام طريقة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك: ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> إذا لم تكن على دراية بتدريب نموذج باستخدام Keras، اطلع على [البرنامج التعليمي الأساسي](../training#train-a-tensorflow-model-with-keras)! </Tip> لتدريب نموذج في TensorFlow، ابدأ بإعداد دالة المحسن، وجدول معدل التعلم، وبعض معاملات التدريب: ```py >>> from transformers import create_optimizer, AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` ثم يمكنك تحميل DistilGPT2 باستخدام [`TFAutoModelForCausalLM`]: ```py >>> from transformers import TFAutoModelForCausalLM >>> model = TFAutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` حول مجموعات بياناتك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]: ```py >>> tf_train_set = model.prepare_tf_dataset( ... lm_dataset["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = model.prepare_tf_dataset( ... lm_dataset["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` قم بتهيئة النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن جميع نماذج Transformers لديها دالة خسارة ذات صلة بالمهمة الافتراضية، لذلك لا تحتاج إلى تحديد واحدة ما لم ترغب في ذلك: ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) # لا يوجد حجة للخسارة! ``` يمكن القيام بذلك عن طريق تحديد مكان دفع نموذجك ومجمّع البيانات في [`~transformers.PushToHubCallback`]: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> callback = PushToHubCallback( ... output_dir="my_awesome_eli5_clm-model", ... tokenizer=tokenizer, ... ) ``` أخيراً، أنت جاهز لبدء تدريب نموذجك! قم باستدعاء [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق من الصحة، وعدد العصور، والتعليقات الخاصة بك لتدريب النموذج: ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback]) ``` بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائيًا إلى Hub حتى يتمكن الجميع من استخدامه! </tf> </frameworkcontent> <Tip> للحصول على مثال أكثر تعمقًا حول كيفية تدريب نموذج للنمذجة اللغوية السببية، اطلع على الدفتر المقابل [دفتر PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb) أو [دفتر TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). </Tip> ## الاستدلال (Inference) رائع، الآن بعد أن قمت بتدريب نموذج، يمكنك استخدامه للاستدلال! قم بابتكار سؤال تود توليد نص منه: ```py >>> prompt = "Somatic hypermutation allows the immune system to" ``` أبسط طريقة لتجربة نموذجك المدرب للاستدلال هي استخدامه في [`pipeline`]. قم بتنفيذ `pipeline` لتوليد النص مع نموذجك، ومرر نصك إليه: ```py >>> from transformers import pipeline >>> generator = pipeline("text-generation", model="username/my_awesome_eli5_clm-model") >>> generator(prompt) [{'generated_text': "Somatic hypermutation allows the immune system to be able to effectively reverse the damage caused by an infection.\n\n\nThe damage caused by an infection is caused by the immune system's ability to perform its own self-correcting tasks."}] ``` <frameworkcontent> <pt> قسم النص وإرجع `input_ids` كتنسورات PyTorch: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_clm-model") >>> inputs = tokenizer(prompt, return_tensors="pt").input_ids ``` استخدم طريقة [`~generation.GenerationMixin.generate`] لتوليد النص. للمزيد من التفاصيل حول استراتيجيات توليد النص المختلفة والبارامترات للتحكم في التوليد، راجع صفحة [استراتيجيات توليد النص](../generation_strategies). ```py >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("username/my_awesome_eli5_clm-model") >>> outputs = model.generate(inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95) ``` فك ترميز الرموز المولدة مرة أخرى إلى نص: ```py >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["Somatic hypermutation allows the immune system to react to drugs with the ability to adapt to a different environmental situation. In other words, a system of 'hypermutation' can help the immune system to adapt to a different environmental situation or in some cases even a single life. In contrast, researchers at the University of Massachusetts-Boston have found that 'hypermutation' is much stronger in mice than in humans but can be found in humans, and that it's not completely unknown to the immune system. A study on how the immune system"] ``` </pt> <tf> قم بتقسيم النص وإرجاع `input_ids` كـ TensorFlow tensors: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_clm-model") >>> inputs = tokenizer(prompt, return_tensors="tf").input_ids ``` استخدم طريقة [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] لإنشاء الملخص. للمزيد من التفاصيل حول استراتيجيات توليد النص المختلفة والبارامترات للتحكم في التوليد، راجع صفحة [استراتيجيات توليد النص](../generation_strategies). ```py >>> from transformers import TFAutoModelForCausalLM >>> model = TFAutoModelForCausalLM.from_pretrained("username/my_awesome_eli5_clm-model") >>> outputs = model.generate(input_ids=inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95) ``` فك ترميز الرموز المولدة مرة أخرى إلى نص: ```py >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Somatic hypermutation allows the immune system to detect the presence of other viruses as they become more prevalent. Therefore, researchers have identified a high proportion of human viruses. The proportion of virus-associated viruses in our study increases with age. Therefore, we propose a simple algorithm to detect the presence of these new viruses in our samples as a sign of improved immunity. A first study based on this algorithm, which will be published in Science on Friday, aims to show that this finding could translate into the development of a better vaccine that is more effective for'] ``` </tf> </frameworkcontent>
transformers/docs/source/ar/tasks/language_modeling.md/0
{ "file_path": "transformers/docs/source/ar/tasks/language_modeling.md", "repo_id": "transformers", "token_count": 9379 }
332
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Trainieren mit einem Skript Neben den 🤗 Transformers [notebooks](./notebooks) gibt es auch Beispielskripte, die zeigen, wie man ein Modell für eine Aufgabe mit [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) oder [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax) trainiert. Sie werden auch Skripte finden, die wir in unseren [Forschungsprojekten](https://github.com/huggingface/transformers-research-projects/) und [Legacy-Beispielen](https://github.com/huggingface/transformers/tree/main/examples/legacy) verwendet haben und die größtenteils von der Community stammen. Diese Skripte werden nicht aktiv gepflegt und erfordern eine bestimmte Version von 🤗 Transformers, die höchstwahrscheinlich nicht mit der neuesten Version der Bibliothek kompatibel ist. Es wird nicht erwartet, dass die Beispielskripte bei jedem Problem sofort funktionieren. Möglicherweise müssen Sie das Skript an das Problem anpassen, das Sie zu lösen versuchen. Um Ihnen dabei zu helfen, legen die meisten Skripte vollständig offen, wie die Daten vorverarbeitet werden, so dass Sie sie nach Bedarf für Ihren Anwendungsfall bearbeiten können. Für jede Funktion, die Sie in einem Beispielskript implementieren möchten, diskutieren Sie bitte im [Forum](https://discuss.huggingface.co/) oder in einem [issue](https://github.com/huggingface/transformers/issues), bevor Sie einen Pull Request einreichen. Wir freuen uns zwar über Fehlerkorrekturen, aber es ist unwahrscheinlich, dass wir einen Pull Request zusammenführen, der mehr Funktionalität auf Kosten der Lesbarkeit hinzufügt. Diese Anleitung zeigt Ihnen, wie Sie ein Beispiel für ein Trainingsskript zur Zusammenfassung in [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) und [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization) ausführen können. Sofern nicht anders angegeben, sollten alle Beispiele mit beiden Frameworks funktionieren. ## Einrichtung Um die neueste Version der Beispielskripte erfolgreich auszuführen, **müssen Sie 🤗 Transformers aus dem Quellcode** in einer neuen virtuellen Umgebung installieren: ```bash git clone https://github.com/huggingface/transformers cd transformers pip install . ``` Für ältere Versionen der Beispielskripte klicken Sie auf die Umschalttaste unten: <details> <summary>Beispiele für ältere Versionen von 🤗 Transformers</summary> <ul> <li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li> </ul> </details> Dann stellen Sie Ihren aktuellen Klon von 🤗 Transformers auf eine bestimmte Version um, z.B. v3.5.1: ```bash git checkout tags/v3.5.1 ``` Nachdem Sie die richtige Bibliotheksversion eingerichtet haben, navigieren Sie zu dem Beispielordner Ihrer Wahl und installieren die beispielspezifischen Anforderungen: ```bash pip install -r requirements.txt ``` ## Ein Skript ausführen <frameworkcontent> <pt> Das Beispielskript lädt einen Datensatz aus der 🤗 [Datasets](https://huggingface.co/docs/datasets/) Bibliothek herunter und verarbeitet ihn vor. Dann nimmt das Skript eine Feinabstimmung eines Datensatzes mit dem [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) auf einer Architektur vor, die eine Zusammenfassung unterstützt. Das folgende Beispiel zeigt, wie die Feinabstimmung von [T5-small](https://huggingface.co/google-t5/t5-small) auf dem Datensatz [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail) durchgeführt wird. Das T5-Modell benötigt aufgrund der Art und Weise, wie es trainiert wurde, ein zusätzliches Argument `source_prefix`. Mit dieser Eingabeaufforderung weiß T5, dass es sich um eine Zusammenfassungsaufgabe handelt. ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` </pt> <tf> Das Beispielskript lädt einen Datensatz aus der 🤗 [Datasets](https://huggingface.co/docs/datasets/) Bibliothek herunter und verarbeitet ihn vor. Anschließend nimmt das Skript die Feinabstimmung eines Datensatzes mit Keras auf einer Architektur vor, die die Zusammenfassung unterstützt. Das folgende Beispiel zeigt, wie die Feinabstimmung von [T5-small](https://huggingface.co/google-t5/t5-small) auf dem [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail) Datensatz durchgeführt wird. Das T5-Modell benötigt aufgrund der Art und Weise, wie es trainiert wurde, ein zusätzliches Argument `source_prefix`. Mit dieser Eingabeaufforderung weiß T5, dass es sich um eine Zusammenfassungsaufgabe handelt. ```bash python examples/tensorflow/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 16 \ --num_train_epochs 3 \ --do_train \ --do_eval ``` </tf> </frameworkcontent> ## Verteiltes Training und gemischte Präzision Der [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) unterstützt verteiltes Training und gemischte Präzision, d.h. Sie können ihn auch in einem Skript verwenden. So aktivieren Sie diese beiden Funktionen: - Fügen Sie das Argument `fp16` hinzu, um gemischte Genauigkeit zu aktivieren. - Legen Sie die Anzahl der zu verwendenden GPUs mit dem Argument `nproc_per_node` fest. ```bash torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` TensorFlow-Skripte verwenden eine [`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy) für verteiltes Training, und Sie müssen dem Trainingsskript keine zusätzlichen Argumente hinzufügen. Das TensorFlow-Skript verwendet standardmäßig mehrere GPUs, wenn diese verfügbar sind. ## Ein Skript auf einer TPU ausführen <frameworkcontent> <pt> Tensor Processing Units (TPUs) sind speziell für die Beschleunigung der Leistung konzipiert. PyTorch unterstützt TPUs mit dem [XLA](https://www.tensorflow.org/xla) Deep Learning Compiler (siehe [hier](https://github.com/pytorch/xla/blob/master/README.md) für weitere Details). Um eine TPU zu verwenden, starten Sie das Skript `xla_spawn.py` und verwenden das Argument `num_cores`, um die Anzahl der TPU-Kerne festzulegen, die Sie verwenden möchten. ```bash python xla_spawn.py --num_cores 8 \ summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` </pt> <tf> Tensor Processing Units (TPUs) sind speziell für die Beschleunigung der Leistung konzipiert. TensorFlow Skripte verwenden eine [`TPUStrategy`](https://www.tensorflow.org/guide/distributed_training#tpustrategy) für das Training auf TPUs. Um eine TPU zu verwenden, übergeben Sie den Namen der TPU-Ressource an das Argument `tpu`. ```bash python run_summarization.py \ --tpu name_of_tpu_resource \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 16 \ --num_train_epochs 3 \ --do_train \ --do_eval ``` </tf> </frameworkcontent> ## Führen Sie ein Skript mit 🤗 Accelerate aus. 🤗 [Accelerate](https://huggingface.co/docs/accelerate) ist eine reine PyTorch-Bibliothek, die eine einheitliche Methode für das Training eines Modells auf verschiedenen Arten von Setups (nur CPU, mehrere GPUs, TPUs) bietet und dabei die vollständige Transparenz der PyTorch-Trainingsschleife beibehält. Stellen Sie sicher, dass Sie 🤗 Accelerate installiert haben, wenn Sie es nicht bereits haben: > Hinweis: Da Accelerate schnell weiterentwickelt wird, muss die Git-Version von Accelerate installiert sein, um die Skripte auszuführen. ```bash pip install git+https://github.com/huggingface/accelerate ``` Anstelle des Skripts `run_summarization.py` müssen Sie das Skript `run_summarization_no_trainer.py` verwenden. Die von Accelerate unterstützten Skripte haben eine Datei `task_no_trainer.py` im Ordner. Beginnen Sie mit dem folgenden Befehl, um eine Konfigurationsdatei zu erstellen und zu speichern: ```bash accelerate config ``` Testen Sie Ihre Einrichtung, um sicherzustellen, dass sie korrekt konfiguriert ist: ```bash accelerate test ``` Jetzt sind Sie bereit, das Training zu starten: ```bash accelerate launch run_summarization_no_trainer.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir ~/tmp/tst-summarization ``` ## Verwenden Sie einen benutzerdefinierten Datensatz Das Verdichtungsskript unterstützt benutzerdefinierte Datensätze, solange es sich um eine CSV- oder JSON-Line-Datei handelt. Wenn Sie Ihren eigenen Datensatz verwenden, müssen Sie mehrere zusätzliche Argumente angeben: - `train_file` und `validation_file` geben den Pfad zu Ihren Trainings- und Validierungsdateien an. - `text_column` ist der Eingabetext, der zusammengefasst werden soll. - Summary_column" ist der auszugebende Zieltext. Ein Zusammenfassungsskript, das einen benutzerdefinierten Datensatz verwendet, würde wie folgt aussehen: ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --train_file path_to_csv_or_jsonlines_file \ --validation_file path_to_csv_or_jsonlines_file \ --text_column text_column_name \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate ``` ## Testen Sie ein Skript Es ist oft eine gute Idee, Ihr Skript an einer kleineren Anzahl von Beispielen für Datensätze auszuführen, um sicherzustellen, dass alles wie erwartet funktioniert, bevor Sie sich auf einen ganzen Datensatz festlegen, dessen Fertigstellung Stunden dauern kann. Verwenden Sie die folgenden Argumente, um den Datensatz auf eine maximale Anzahl von Stichproben zu beschränken: - `max_train_samples` - `max_eval_samples` - `max_predict_samples` ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --max_train_samples 50 \ --max_eval_samples 50 \ --max_predict_samples 50 \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` Nicht alle Beispielskripte unterstützen das Argument `max_predict_samples`. Wenn Sie sich nicht sicher sind, ob Ihr Skript dieses Argument unterstützt, fügen Sie das Argument `-h` hinzu, um dies zu überprüfen: ```bash examples/pytorch/summarization/run_summarization.py -h ``` ## Training vom Kontrollpunkt fortsetzen Eine weitere hilfreiche Option, die Sie aktivieren können, ist die Wiederaufnahme des Trainings von einem früheren Kontrollpunkt aus. Auf diese Weise können Sie im Falle einer Unterbrechung Ihres Trainings dort weitermachen, wo Sie aufgehört haben, ohne von vorne beginnen zu müssen. Es gibt zwei Methoden, um das Training von einem Kontrollpunkt aus wieder aufzunehmen. Die erste Methode verwendet das Argument `output_dir previous_output_dir`, um das Training ab dem letzten in `output_dir` gespeicherten Kontrollpunkt wieder aufzunehmen. In diesem Fall sollten Sie `overwrite_output_dir` entfernen: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --output_dir previous_output_dir \ --predict_with_generate ``` Die zweite Methode verwendet das Argument `Resume_from_checkpoint path_to_specific_checkpoint`, um das Training ab einem bestimmten Checkpoint-Ordner wieder aufzunehmen. ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` ## Teilen Sie Ihr Modell Alle Skripte können Ihr endgültiges Modell in den [Model Hub](https://huggingface.co/models) hochladen. Stellen Sie sicher, dass Sie bei Hugging Face angemeldet sind, bevor Sie beginnen: ```bash hf auth login ``` Dann fügen Sie dem Skript das Argument `push_to_hub` hinzu. Mit diesem Argument wird ein Repository mit Ihrem Hugging Face-Benutzernamen und dem in `output_dir` angegebenen Ordnernamen erstellt. Wenn Sie Ihrem Repository einen bestimmten Namen geben möchten, fügen Sie ihn mit dem Argument `push_to_hub_model_id` hinzu. Das Repository wird automatisch unter Ihrem Namensraum aufgeführt. Das folgende Beispiel zeigt, wie Sie ein Modell mit einem bestimmten Repository-Namen hochladen können: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --push_to_hub \ --push_to_hub_model_id finetuned-t5-cnn_dailymail \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ```
transformers/docs/source/de/run_scripts.md/0
{ "file_path": "transformers/docs/source/de/run_scripts.md", "repo_id": "transformers", "token_count": 7505 }
333
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Chat templates The [chat basics](./conversations) guide covers how to store chat histories and generate text from chat models using [`TextGenerationPipeline`]. This guide is intended for more advanced users, and covers the underlying classes and methods, as well as the key concepts for understanding what's actually going on when you chat with a model. The critical insight needed to understand chat models is this: All causal LMs, whether chat-trained or not, continue a sequence of tokens. When causal LMs are trained, the training usually begins with "pre-training" on a huge corpus of text, which creates a "base" model. These base models are then often "fine-tuned" for chat, which means training them on data that is formatted as a sequence of messages. The chat is still just a sequence of tokens, though! The list of `role` and `content` dictionaries that you pass to a chat model get converted to a token sequence, often with control tokens like `<|user|>` or `<|assistant|>` or `<|end_of_message|>`, which allow the model to see the chat structure. There are many possible chat formats, and different models may use different formats or control tokens, even if they were fine-tuned from the same base model! Don't panic, though - you don't need to memorize every possible chat format in order to use chat models. Chat models come with **chat templates**, which indicate how they expect chats to be formatted. You can access these with the [`apply_chat_template`] method. Let's see two examples. Both of these models are fine-tuned from the same `Mistral-7B` base model: <hfoptions id="template"> <hfoption id="Mistral"> ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") chat = [ {"role": "user", "content": "Hello, how are you?"}, {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, {"role": "user", "content": "I'd like to show off how chat templating works!"}, ] tokenizer.apply_chat_template(chat, tokenize=False) ``` ```md <s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST] ``` </hfoption> <hfoption id="Zephyr"> ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") chat = [ {"role": "user", "content": "Hello, how are you?"}, {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, {"role": "user", "content": "I'd like to show off how chat templating works!"}, ] tokenizer.apply_chat_template(chat, tokenize=False) ``` ```md <|user|>\nHello, how are you?</s>\n<|assistant|>\nI'm doing great. How can I help you today?</s>\n<|user|>\nI'd like to show off how chat templating works!</s>\n ``` </hfoption> </hfoptions> Mistral-7B-Instruct uses `[INST]` and `[/INST]` tokens to indicate the start and end of user messages, while Zephyr-7B uses `<|user|>` and `<|assistant|>` tokens to indicate speaker roles. This is why chat templates are important - with the wrong control tokens, these models would have drastically worse performance. ## Using `apply_chat_template` The input to `apply_chat_template` should be structured as a list of dictionaries with `role` and `content` keys. The `role` key specifies the speaker, and the `content` key contains the message. The common roles are: - `user` for messages from the user - `assistant` for messages from the model - `system` for directives on how the model should act (usually placed at the beginning of the chat) [`apply_chat_template`] takes this list and returns a formatted sequence. Set `tokenize=True` if you want to tokenize the sequence. ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-beta", device_map="auto", dtype=torch.bfloat16) messages = [ {"role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate",}, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") print(tokenizer.decode(tokenized_chat[0])) ``` ```md <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> ``` Pass the tokenized chat to [`~GenerationMixin.generate`] to generate a response. ```py outputs = model.generate(tokenized_chat, max_new_tokens=128) print(tokenizer.decode(outputs[0])) ``` ```md <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all. ``` > [!WARNING] > Some tokenizers add special `<bos>` and `<eos>` tokens. Chat templates should already include all the necessary special tokens, and adding additional special tokens is often incorrect or duplicated, hurting model performance. When you format text with `apply_chat_template(tokenize=False)`, make sure you set `add_special_tokens=False` if you tokenize later to avoid duplicating these tokens. > This isn’t an issue if you use `apply_chat_template(tokenize=True)`, which means it's usually the safer option! ### add_generation_prompt You may have noticed the [add_generation_prompt](https://huggingface.co/docs/transformers/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.apply_chat_template.add_generation_prompt) argument in the above examples. This argument adds tokens to the end of the chat that indicate the start of an `assistant` response. Remember: Beneath all the chat abstractions, chat models are still just language models that continue a sequence of tokens! If you include tokens that tell it that it's now in an `assistant` response, it will correctly write a response, but if you don't include these tokens, the model may get confused and do something strange, like **continuing** the user's message instead of replying to it! Let's see an example to understand what `add_generation_prompt` is actually doing. First, let's format a chat without `add_generation_prompt`: ```py tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False) tokenized_chat ``` ```md <|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> ``` Now, let's format the same chat with `add_generation_prompt=True`: ```py tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) tokenized_chat ``` ```md <|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> <|im_start|>assistant ``` When `add_generation_prompt=True`, `<|im_start|>assistant` is added at the end to indicate the start of an `assistant` message. This lets the model know an `assistant` response is next. Not all models require generation prompts, and some models, like [Llama](./model_doc/llama), don’t have any special tokens before the `assistant` response. In these cases, [add_generation_prompt](https://huggingface.co/docs/transformers/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.apply_chat_template.add_generation_prompt) has no effect. ### continue_final_message The [continue_final_message](https://huggingface.co/docs/transformers/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.apply_chat_template.continue_final_message) parameter controls whether the final message in the chat should be continued or not instead of starting a new one. It removes end of sequence tokens so that the model continues generation from the final message. This is useful for “prefilling” a model response. In the example below, the model generates text that continues the JSON string rather than starting a new message. It can be very useful for improving the accuracy of instruction following when you know how to start its replies. ```py chat = [ {"role": "user", "content": "Can you format the answer in JSON?"}, {"role": "assistant", "content": '{"name": "'}, ] formatted_chat = tokenizer.apply_chat_template(chat, tokenize=True, return_dict=True, continue_final_message=True) model.generate(**formatted_chat) ``` > [!WARNING] > You shouldn’t use [add_generation_prompt](https://huggingface.co/docs/transformers/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.apply_chat_template.add_generation_prompt) and [continue_final_message](https://huggingface.co/docs/transformers/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.apply_chat_template.continue_final_message) together. The former adds tokens that start a new message, while the latter removes end of sequence tokens. Using them together returns an error. [`TextGenerationPipeline`] sets [add_generation_prompt](https://huggingface.co/docs/transformers/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.apply_chat_template.add_generation_prompt) to `True` by default to start a new message. However, if the final message in the chat has the `assistant` role, it assumes the message is a prefill and switches to `continue_final_message=True`. This is because most models don’t support multiple consecutive assistant messages. To override this behavior, explicitly pass the [continue_final_message](https://huggingface.co/docs/transformers/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.apply_chat_template.continue_final_message) argument to the pipeline. ## Model training Training a model with a chat template is a good way to ensure the template matches the tokens the model was trained on. Apply the chat template as a preprocessing step to your dataset. Set `add_generation_prompt=False` because the additional tokens to prompt an assistant response aren’t helpful during training. An example of preprocessing a dataset with a chat template is shown below. ```py from transformers import AutoTokenizer from datasets import Dataset tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") chat1 = [ {"role": "user", "content": "Which is bigger, the moon or the sun?"}, {"role": "assistant", "content": "The sun."} ] chat2 = [ {"role": "user", "content": "Which is bigger, a virus or a bacterium?"}, {"role": "assistant", "content": "A bacterium."} ] dataset = Dataset.from_dict({"chat": [chat1, chat2]}) dataset = dataset.map(lambda x: {"formatted_chat": tokenizer.apply_chat_template(x["chat"], tokenize=False, add_generation_prompt=False)}) print(dataset['formatted_chat'][0]) ``` ```md <|user|> Which is bigger, the moon or the sun?</s> <|assistant|> The sun.</s> ``` After this step, you can continue following the [training recipe](./tasks/language_modeling) for causal language models using the `formatted_chat` column.
transformers/docs/source/en/chat_templating.md/0
{ "file_path": "transformers/docs/source/en/chat_templating.md", "repo_id": "transformers", "token_count": 3572 }
334
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GGUF [GGUF](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md) is a file format used to store models for inference with [GGML](https://github.com/ggerganov/ggml), a fast and lightweight inference framework written in C and C++. GGUF is a single-file format containing the model metadata and tensors. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/gguf-spec.png"/> </div> The GGUF format also supports many quantized data types (refer to [quantization type table](https://hf.co/docs/hub/en/gguf#quantization-types) for a complete list of supported quantization types) which saves a significant amount of memory, making inference with large models like Whisper and Llama feasible on local and edge devices. Transformers supports loading models stored in the GGUF format for further training or finetuning. The GGUF checkpoint is **dequantized to fp32** where the full model weights are available and compatible with PyTorch. > [!TIP] > Models that support GGUF include Llama, Mistral, Qwen2, Qwen2Moe, Phi3, Bloom, Falcon, StableLM, GPT2, Starcoder2, and [more](https://github.com/huggingface/transformers/blob/main/src/transformers/integrations/ggml.py) Add the `gguf_file` parameter to [`~PreTrainedModel.from_pretrained`] to specify the GGUF file to load. ```py # pip install gguf from transformers import AutoTokenizer, AutoModelForCausalLM model_id = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" filename = "tinyllama-1.1b-chat-v1.0.Q6_K.gguf" dtype = torch.float32 # could be torch.float16 or torch.bfloat16 too tokenizer = AutoTokenizer.from_pretrained(model_id, gguf_file=filename) model = AutoModelForCausalLM.from_pretrained(model_id, gguf_file=filename, dtype=dtype) ``` Once you're done tinkering with the model, save and convert it back to the GGUF format with the [convert-hf-to-gguf.py](https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py) script. ```py tokenizer.save_pretrained("directory") model.save_pretrained("directory") !python ${path_to_llama_cpp}/convert-hf-to-gguf.py ${directory} ```
transformers/docs/source/en/gguf.md/0
{ "file_path": "transformers/docs/source/en/gguf.md", "repo_id": "transformers", "token_count": 892 }
335
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Logging 🤗 Transformers has a centralized logging system, so that you can setup the verbosity of the library easily. Currently the default verbosity of the library is `WARNING`. To change the level of verbosity, just use one of the direct setters. For instance, here is how to change the verbosity to the INFO level. ```python import transformers transformers.logging.set_verbosity_info() ``` You can also use the environment variable `TRANSFORMERS_VERBOSITY` to override the default verbosity. You can set it to one of the following: `debug`, `info`, `warning`, `error`, `critical`, `fatal`. For example: ```bash TRANSFORMERS_VERBOSITY=error ./myprogram.py ``` Additionally, some `warnings` can be disabled by setting the environment variable `TRANSFORMERS_NO_ADVISORY_WARNINGS` to a true value, like *1*. This will disable any warning that is logged using [`logger.warning_advice`]. For example: ```bash TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py ``` Here is an example of how to use the same logger as the library in your own module or script: ```python from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger("transformers") logger.info("INFO") logger.warning("WARN") ``` All the methods of this logging module are documented below, the main ones are [`logging.get_verbosity`] to get the current level of verbosity in the logger and [`logging.set_verbosity`] to set the verbosity to the level of your choice. In order (from the least verbose to the most verbose), those levels (with their corresponding int values in parenthesis) are: - `transformers.logging.CRITICAL` or `transformers.logging.FATAL` (int value, 50): only report the most critical errors. - `transformers.logging.ERROR` (int value, 40): only report errors. - `transformers.logging.WARNING` or `transformers.logging.WARN` (int value, 30): only reports error and warnings. This is the default level used by the library. - `transformers.logging.INFO` (int value, 20): reports error, warnings and basic information. - `transformers.logging.DEBUG` (int value, 10): report all information. By default, `tqdm` progress bars will be displayed during model download. [`logging.disable_progress_bar`] and [`logging.enable_progress_bar`] can be used to suppress or unsuppress this behavior. ## `logging` vs `warnings` Python has two logging systems that are often used in conjunction: `logging`, which is explained above, and `warnings`, which allows further classification of warnings in specific buckets, e.g., `FutureWarning` for a feature or path that has already been deprecated and `DeprecationWarning` to indicate an upcoming deprecation. We use both in the `transformers` library. We leverage and adapt `logging`'s `captureWarnings` method to allow management of these warning messages by the verbosity setters above. What does that mean for developers of the library? We should respect the following heuristics: - `warnings` should be favored for developers of the library and libraries dependent on `transformers` - `logging` should be used for end-users of the library using it in every-day projects See reference of the `captureWarnings` method below. [[autodoc]] logging.captureWarnings ## Base setters [[autodoc]] logging.set_verbosity_error [[autodoc]] logging.set_verbosity_warning [[autodoc]] logging.set_verbosity_info [[autodoc]] logging.set_verbosity_debug ## Other functions [[autodoc]] logging.get_verbosity [[autodoc]] logging.set_verbosity [[autodoc]] logging.get_logger [[autodoc]] logging.enable_default_handler [[autodoc]] logging.disable_default_handler [[autodoc]] logging.enable_explicit_format [[autodoc]] logging.reset_format [[autodoc]] logging.enable_progress_bar [[autodoc]] logging.disable_progress_bar
transformers/docs/source/en/main_classes/logging.md/0
{ "file_path": "transformers/docs/source/en/main_classes/logging.md", "repo_id": "transformers", "token_count": 1276 }
336
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2022-11-12 and added to Hugging Face Transformers on 2023-01-04.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> # AltCLIP [AltCLIP](https://huggingface.co/papers/2211.06679) replaces the [CLIP](./clip) text encoder with a multilingual XLM-R encoder and aligns image and text representations with teacher learning and contrastive learning. You can find all the original AltCLIP checkpoints under the [AltClip](https://huggingface.co/collections/BAAI/alt-clip-diffusion-66987a97de8525205f1221bf) collection. > [!TIP] > Click on the AltCLIP models in the right sidebar for more examples of how to apply AltCLIP to different tasks. The examples below demonstrates how to calculate similarity scores between an image and one or more captions with the [`AutoModel`] class. <hfoptions id="usage"> <hfoption id="AutoModel"> ```python import torch import requests from PIL import Image from transformers import AltCLIPModel, AltCLIPProcessor model = AltCLIPModel.from_pretrained("BAAI/AltCLIP", dtype=torch.bfloat16) processor = AltCLIPProcessor.from_pretrained("BAAI/AltCLIP") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities labels = ["a photo of a cat", "a photo of a dog"] for label, prob in zip(labels, probs[0]): print(f"{label}: {prob.item():.4f}") ``` </hfoption> </hfoptions> Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends. The example below uses [torchao](../quantization/torchao) to only quantize the weights to int4. ```python # !pip install torchao import torch import requests from PIL import Image from transformers import AltCLIPModel, AltCLIPProcessor, TorchAoConfig model = AltCLIPModel.from_pretrained( "BAAI/AltCLIP", quantization_config=TorchAoConfig("int4_weight_only", group_size=128), dtype=torch.bfloat16, ) processor = AltCLIPProcessor.from_pretrained("BAAI/AltCLIP") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities labels = ["a photo of a cat", "a photo of a dog"] for label, prob in zip(labels, probs[0]): print(f"{label}: {prob.item():.4f}") ``` ## Notes - AltCLIP uses bidirectional attention instead of causal attention and it uses the `[CLS]` token in XLM-R to represent a text embedding. - Use [`CLIPImageProcessor`] to resize (or rescale) and normalize images for the model. - [`AltCLIPProcessor`] combines [`CLIPImageProcessor`] and [`XLMRobertaTokenizer`] into a single instance to encode text and prepare images. ## AltCLIPConfig [[autodoc]] AltCLIPConfig ## AltCLIPTextConfig [[autodoc]] AltCLIPTextConfig ## AltCLIPVisionConfig [[autodoc]] AltCLIPVisionConfig ## AltCLIPModel [[autodoc]] AltCLIPModel ## AltCLIPTextModel [[autodoc]] AltCLIPTextModel ## AltCLIPVisionModel [[autodoc]] AltCLIPVisionModel ## AltCLIPProcessor [[autodoc]] AltCLIPProcessor
transformers/docs/source/en/model_doc/altclip.md/0
{ "file_path": "transformers/docs/source/en/model_doc/altclip.md", "repo_id": "transformers", "token_count": 1522 }
337
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2020-05-20 and added to Hugging Face Transformers on 2020-11-16.* # BERTweet <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## BERTweet [BERTweet](https://huggingface.co/papers/2005.10200) shares the same architecture as [BERT-base](./bert), but it’s pretrained like [RoBERTa](./roberta) on English Tweets. It performs really well on Tweet-related tasks like part-of-speech tagging, named entity recognition, and text classification. You can find all the original BERTweet checkpoints under the [VinAI Research](https://huggingface.co/vinai?search_models=BERTweet) organization. > [!TIP] > Refer to the [BERT](./bert) docs for more examples of how to apply BERTweet to different language tasks. The example below demonstrates how to predict the `<mask>` token with [`Pipeline`], [`AutoModel`], and from the command line. <hfoptions id="usage"> <hfoption id="Pipeline"> ```py import torch from transformers import pipeline pipeline = pipeline( task="fill-mask", model="vinai/bertweet-base", dtype=torch.float16, device=0 ) pipeline("Plants create <mask> through a process known as photosynthesis.") ``` </hfoption> <hfoption id="AutoModel"> ```py import torch from transformers import AutoModelForMaskedLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( "vinai/bertweet-base", ) model = AutoModelForMaskedLM.from_pretrained( "vinai/bertweet-base", dtype=torch.float16, device_map="auto" ) inputs = tokenizer("Plants create <mask> through a process known as photosynthesis.", return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model(**inputs) predictions = outputs.logits masked_index = torch.where(inputs['input_ids'] == tokenizer.mask_token_id)[1] predicted_token_id = predictions[0, masked_index].argmax(dim=-1) predicted_token = tokenizer.decode(predicted_token_id) print(f"The predicted token is: {predicted_token}") ``` </hfoption> <hfoption id="transformers CLI"> ```bash echo -e "Plants create <mask> through a process known as photosynthesis." | transformers-cli run --task fill-mask --model vinai/bertweet-base --device 0 ``` </hfoption> </hfoptions> ## Notes - Use the [`AutoTokenizer`] or [`BertweetTokenizer`] because it’s preloaded with a custom vocabulary adapted to tweet-specific tokens like hashtags (#), mentions (@), emojis, and common abbreviations. Make sure to also install the [emoji](https://pypi.org/project/emoji/) library. - Inputs should be padded on the right (`padding="max_length"`) because BERT uses absolute position embeddings. ## BertweetTokenizer [[autodoc]] BertweetTokenizer
transformers/docs/source/en/model_doc/bertweet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bertweet.md", "repo_id": "transformers", "token_count": 1111 }
338
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2021-03-11 and added to Hugging Face Transformers on 2021-06-30.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # CANINE [CANINE](https://huggingface.co/papers/2103.06874) is a tokenization-free Transformer. It skips the usual step of splitting text into subwords or wordpieces and processes text character by character. That means it works directly with raw Unicode, making it especially useful for languages with complex or inconsistent tokenization rules and even noisy inputs like typos. Since working with characters means handling longer sequences, CANINE uses a smart trick. The model compresses the input early on (called downsampling) so the transformer doesn’t have to process every character individually. This keeps things fast and efficient. You can find all the original CANINE checkpoints under the [Google](https://huggingface.co/google?search_models=canine) organization. > [!TIP] > Click on the CANINE models in the right sidebar for more examples of how to apply CANINE to different language tasks. The example below demonstrates how to generate embeddings with [`Pipeline`], [`AutoModel`], and from the command line. <hfoptions id="usage"> <hfoption id="Pipeline"> ```py import torch from transformers import pipeline pipeline = pipeline( task="feature-extraction", model="google/canine-c", device=0, ) pipeline("Plant create energy through a process known as photosynthesis.") ``` </hfoption> <hfoption id="AutoModel"> ```py import torch from transformers import AutoModel model = AutoModel.from_pretrained("google/canine-c") text = "Plant create energy through a process known as photosynthesis." input_ids = torch.tensor([[ord(char) for char in text]]) outputs = model(input_ids) pooled_output = outputs.pooler_output sequence_output = outputs.last_hidden_state ``` </hfoption> <hfoption id="transformers CLI"> ```bash echo -e "Plant create energy through a process known as photosynthesis." | transformers-cli run --task feature-extraction --model google/canine-c --device 0 ``` </hfoption> </hfoptions> ## Notes - CANINE skips tokenization entirely — it works directly on raw characters, not subwords. You can use it with or without a tokenizer. For batched inference and training, it is recommended to use the tokenizer to pad and truncate all sequences to the same length. ```py from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer("google/canine-c") inputs = ["Life is like a box of chocolates.", "You never know what you gonna get."] encoding = tokenizer(inputs, padding="longest", truncation=True, return_tensors="pt") ``` - CANINE is primarily designed to be fine-tuned on a downstream task. The pretrained model can be used for either masked language modeling or next sentence prediction. ## CanineConfig [[autodoc]] CanineConfig ## CanineTokenizer [[autodoc]] CanineTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences ## CANINE specific outputs [[autodoc]] models.canine.modeling_canine.CanineModelOutputWithPooling ## CanineModel [[autodoc]] CanineModel - forward ## CanineForSequenceClassification [[autodoc]] CanineForSequenceClassification - forward ## CanineForMultipleChoice [[autodoc]] CanineForMultipleChoice - forward ## CanineForTokenClassification [[autodoc]] CanineForTokenClassification - forward ## CanineForQuestionAnswering [[autodoc]] CanineForQuestionAnswering - forward
transformers/docs/source/en/model_doc/canine.md/0
{ "file_path": "transformers/docs/source/en/model_doc/canine.md", "repo_id": "transformers", "token_count": 1325 }
339
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2022-01-10 and added to Hugging Face Transformers on 2022-02-07.* # ConvNeXT <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The ConvNeXT model was proposed in [A ConvNet for the 2020s](https://huggingface.co/papers/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. ConvNeXT is a pure convolutional model (ConvNet), inspired by the design of Vision Transformers, that claims to outperform them. The abstract from the paper is the following: *The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convnext_architecture.jpg" alt="drawing" width="600"/> <small> ConvNeXT architecture. Taken from the <a href="https://huggingface.co/papers/2201.03545">original paper</a>.</small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/ConvNeXt). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ConvNeXT. <PipelineTag pipeline="image-classification"/> - [`ConvNextForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ConvNextConfig [[autodoc]] ConvNextConfig ## ConvNextFeatureExtractor [[autodoc]] ConvNextFeatureExtractor ## ConvNextImageProcessor [[autodoc]] ConvNextImageProcessor - preprocess ## ConvNextImageProcessorFast [[autodoc]] ConvNextImageProcessorFast - preprocess ## ConvNextModel [[autodoc]] ConvNextModel - forward ## ConvNextForImageClassification [[autodoc]] ConvNextForImageClassification - forward
transformers/docs/source/en/model_doc/convnext.md/0
{ "file_path": "transformers/docs/source/en/model_doc/convnext.md", "repo_id": "transformers", "token_count": 1191 }
340
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> *This model was released on 2023-09-28 and added to Hugging Face Transformers on 2024-12-24.* # DINOv2 with Registers <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The DINOv2 with Registers model was proposed in [Vision Transformers Need Registers](https://huggingface.co/papers/2309.16588) by Timothée Darcet, Maxime Oquab, Julien Mairal, Piotr Bojanowski. The [Vision Transformer](vit) (ViT) is a transformer encoder model (BERT-like) originally introduced to do supervised image classification on ImageNet. Next, people figured out ways to make ViT work really well on self-supervised image feature extraction (i.e. learning meaningful features, also called embeddings) on images without requiring any labels. Some example papers here include [DINOv2](dinov2) and [MAE](vit_mae). The authors of DINOv2 noticed that ViTs have artifacts in attention maps. It’s due to the model using some image patches as “registers”. The authors propose a fix: just add some new tokens (called "register" tokens), which you only use during pre-training (and throw away afterwards). This results in: - no artifacts - interpretable attention maps - and improved performances. The abstract from the paper is the following: *Transformers have recently emerged as a powerful tool for learning visual representations. In this paper, we identify and characterize artifacts in feature maps of both supervised and self-supervised ViT networks. The artifacts correspond to high-norm tokens appearing during inference primarily in low-informative background areas of images, that are repurposed for internal computations. We propose a simple yet effective solution based on providing additional tokens to the input sequence of the Vision Transformer to fill that role. We show that this solution fixes that problem entirely for both supervised and self-supervised models, sets a new state of the art for self-supervised visual models on dense visual prediction tasks, enables object discovery methods with larger models, and most importantly leads to smoother feature maps and attention maps for downstream visual processing.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dinov2_with_registers_visualization.png" alt="drawing" width="600"/> <small> Visualization of attention maps of various models trained with vs. without registers. Taken from the <a href="https://huggingface.co/papers/2309.16588">original paper</a>. </small> Tips: - Usage of DINOv2 with Registers is identical to DINOv2 without, you'll just get better performance. This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/dinov2). ## Dinov2WithRegistersConfig [[autodoc]] Dinov2WithRegistersConfig ## Dinov2WithRegistersModel [[autodoc]] Dinov2WithRegistersModel - forward ## Dinov2WithRegistersForImageClassification [[autodoc]] Dinov2WithRegistersForImageClassification - forward
transformers/docs/source/en/model_doc/dinov2_with_registers.md/0
{ "file_path": "transformers/docs/source/en/model_doc/dinov2_with_registers.md", "repo_id": "transformers", "token_count": 1074 }
341
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2021-12-08 and added to Hugging Face Transformers on 2022-05-11.* # FLAVA <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The FLAVA model was proposed in [FLAVA: A Foundational Language And Vision Alignment Model](https://huggingface.co/papers/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela and is accepted at CVPR 2022. The paper aims at creating a single unified foundation model which can work across vision, language as well as vision-and-language multimodal tasks. The abstract from the paper is the following: *State-of-the-art vision and vision-and-language models rely on large-scale visio-linguistic pretraining for obtaining good performance on a variety of downstream tasks. Generally, such models are often either cross-modal (contrastive) or multi-modal (with earlier fusion) but not both; and they often only target specific modalities or tasks. A promising direction would be to use a single holistic universal model, as a "foundation", that targets all modalities at once -- a true vision and language foundation model should be good at vision tasks, language tasks, and cross- and multi-modal vision and language tasks. We introduce FLAVA as such a model and demonstrate impressive performance on a wide range of 35 tasks spanning these target modalities.* This model was contributed by [aps](https://huggingface.co/aps). The original code can be found [here](https://github.com/facebookresearch/multimodal/tree/main/examples/flava). ## FlavaConfig [[autodoc]] FlavaConfig ## FlavaTextConfig [[autodoc]] FlavaTextConfig ## FlavaImageConfig [[autodoc]] FlavaImageConfig ## FlavaMultimodalConfig [[autodoc]] FlavaMultimodalConfig ## FlavaImageCodebookConfig [[autodoc]] FlavaImageCodebookConfig ## FlavaProcessor [[autodoc]] FlavaProcessor ## FlavaFeatureExtractor [[autodoc]] FlavaFeatureExtractor ## FlavaImageProcessor [[autodoc]] FlavaImageProcessor - preprocess ## FlavaImageProcessorFast [[autodoc]] FlavaImageProcessorFast - preprocess ## FlavaForPreTraining [[autodoc]] FlavaForPreTraining - forward ## FlavaModel [[autodoc]] FlavaModel - forward - get_text_features - get_image_features ## FlavaImageCodebook [[autodoc]] FlavaImageCodebook - forward - get_codebook_indices - get_codebook_probs ## FlavaTextModel [[autodoc]] FlavaTextModel - forward ## FlavaImageModel [[autodoc]] FlavaImageModel - forward ## FlavaMultimodalModel [[autodoc]] FlavaMultimodalModel - forward
transformers/docs/source/en/model_doc/flava.md/0
{ "file_path": "transformers/docs/source/en/model_doc/flava.md", "repo_id": "transformers", "token_count": 1040 }
342
<!--Copyright 2025 The ZhipuAI Inc. and The HuggingFace Inc. team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2025-07-28 and added to Hugging Face Transformers on 2025-08-08.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # Glm4vMoe ## Overview Vision-language models (VLMs) have become a key cornerstone of intelligent systems. As real-world AI tasks grow increasingly complex, VLMs urgently need to enhance reasoning capabilities beyond basic multimodal perception — improving accuracy, comprehensiveness, and intelligence — to enable complex problem solving, long-context understanding, and multimodal agents. Through our open-source work, we aim to explore the technological frontier together with the community while empowering more developers to create exciting and innovative applications. [GLM-4.5V](https://huggingface.co/papers/2508.06471) ([Github repo](https://github.com/zai-org/GLM-V)) is based on ZhipuAI’s next-generation flagship text foundation model GLM-4.5-Air (106B parameters, 12B active). It continues the technical approach of [GLM-4.1V-Thinking](https://huggingface.co/papers/2507.01006), achieving SOTA performance among models of the same scale on 42 public vision-language benchmarks. It covers common tasks such as image, video, and document understanding, as well as GUI agent operations. ![bench_45](https://raw.githubusercontent.com/zai-org/GLM-V/refs/heads/main/resources/bench_45v.jpeg) Beyond benchmark performance, GLM-4.5V focuses on real-world usability. Through efficient hybrid training, it can handle diverse types of visual content, enabling full-spectrum vision reasoning, including: - **Image reasoning** (scene understanding, complex multi-image analysis, spatial recognition) - **Video understanding** (long video segmentation and event recognition) - **GUI tasks** (screen reading, icon recognition, desktop operation assistance) - **Complex chart & long document parsing** (research report analysis, information extraction) - **Grounding** (precise visual element localization) The model also introduces a **Thinking Mode** switch, allowing users to balance between quick responses and deep reasoning. This switch works the same as in the `GLM-4.5` language model. ## Glm4vMoeConfig [[autodoc]] Glm4vMoeConfig ## Glm4vMoeTextConfig [[autodoc]] Glm4vMoeTextConfig ## Glm4vMoeTextModel [[autodoc]] Glm4vMoeTextModel - forward ## Glm4vMoeModel [[autodoc]] Glm4vMoeModel - forward ## Glm4vMoeForConditionalGeneration [[autodoc]] Glm4vMoeForConditionalGeneration - forward
transformers/docs/source/en/model_doc/glm4v_moe.md/0
{ "file_path": "transformers/docs/source/en/model_doc/glm4v_moe.md", "repo_id": "transformers", "token_count": 1047 }
343
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2024-08-23 and added to Hugging Face Transformers on 2025-02-14.* # GraniteMoeShared ## Overview The GraniteMoe model was proposed in [Power Scheduler: A Batch Size and Token Number Agnostic Learning Rate Scheduler](https://huggingface.co/papers/2408.13359) by Yikang Shen, Matthew Stallone, Mayank Mishra, Gaoyuan Zhang, Shawn Tan, Aditya Prasad, Adriana Meza Soria, David D. Cox and Rameswar Panda. Additionally this class GraniteMoeSharedModel adds shared experts for Moe. ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer model_path = "ibm-research/moe-7b-1b-active-shared-experts" tokenizer = AutoTokenizer.from_pretrained(model_path) # drop device_map if running on CPU model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto") model.eval() # change input text as desired prompt = "Write a code to find the maximum value in a list of numbers." # tokenize the text input_tokens = tokenizer(prompt, return_tensors="pt") # generate output tokens output = model.generate(**input_tokens, max_new_tokens=100) # decode output tokens into text output = tokenizer.batch_decode(output) # loop over the batch to print, in this example the batch size is 1 for i in output: print(i) ``` This HF implementation is contributed by [Mayank Mishra](https://huggingface.co/mayank-mishra), [Shawn Tan](https://huggingface.co/shawntan) and [Sukriti Sharma](https://huggingface.co/SukritiSharma). ## GraniteMoeSharedConfig [[autodoc]] GraniteMoeSharedConfig ## GraniteMoeSharedModel [[autodoc]] GraniteMoeSharedModel - forward ## GraniteMoeSharedForCausalLM [[autodoc]] GraniteMoeSharedForCausalLM - forward
transformers/docs/source/en/model_doc/granitemoeshared.md/0
{ "file_path": "transformers/docs/source/en/model_doc/granitemoeshared.md", "repo_id": "transformers", "token_count": 745 }
344
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2023-01-19 and added to Hugging Face Transformers on 2024-12-05.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # I-JEPA [I-JEPA](https://huggingface.co/papers/2301.08243) is a self-supervised learning method that learns semantic image representations by predicting parts of an image from other parts of the image. It compares the abstract representations of the image (rather than pixel level comparisons), which avoids the typical pitfalls of data augmentation bias and pixel-level details that don't capture semantic meaning. You can find the original I-JEPA checkpoints under the [AI at Meta](https://huggingface.co/facebook/models?search=ijepa) organization. > [!TIP] > This model was contributed by [jmtzt](https://huggingface.co/jmtzt). <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/ijepa_architecture.jpg"> > Click on the I-JEPA models in the right sidebar for more examples of how to apply I-JEPA to different image representation and classification tasks. The example below demonstrates how to extract image features with [`Pipeline`] or the [`AutoModel`] class. <hfoptions id="usage"> <hfoption id="Pipeline"> ```py import torch from transformers import pipeline feature_extractor = pipeline( task="image-feature-extraction", model="facebook/ijepa_vith14_1k", device=0, dtype=torch.bfloat16 ) features = feature_extractor("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg", return_tensors=True) print(f"Feature shape: {features.shape}") ``` </hfoption> <hfoption id="AutoModel"> ```py import requests import torch from PIL import Image from torch.nn.functional import cosine_similarity from transformers import AutoModel, AutoProcessor url_1 = "http://images.cocodataset.org/val2017/000000039769.jpg" url_2 = "http://images.cocodataset.org/val2017/000000219578.jpg" image_1 = Image.open(requests.get(url_1, stream=True).raw) image_2 = Image.open(requests.get(url_2, stream=True).raw) processor = AutoProcessor.from_pretrained("facebook/ijepa_vith14_1k") model = AutoModel.from_pretrained("facebook/ijepa_vith14_1k", dtype="auto", attn_implementation="sdpa") def infer(image): inputs = processor(image, return_tensors="pt") outputs = model(**inputs) return outputs.last_hidden_state.mean(dim=1) embed_1 = infer(image_1) embed_2 = infer(image_2) similarity = cosine_similarity(embed_1, embed_2) print(similarity) ``` </hfoption> </hfoptions> Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends. The example below uses [bitsandbytes](../quantization/bitsandbytes) to only quantize the weights to 4-bits. ```py import torch from transformers import BitsAndBytesConfig, AutoModel, AutoProcessor from datasets import load_dataset quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, ) url_1 = "http://images.cocodataset.org/val2017/000000039769.jpg" url_2 = "http://images.cocodataset.org/val2017/000000219578.jpg" image_1 = Image.open(requests.get(url_1, stream=True).raw) image_2 = Image.open(requests.get(url_2, stream=True).raw) processor = AutoProcessor.from_pretrained("facebook/ijepa_vitg16_22k") model = AutoModel.from_pretrained("facebook/ijepa_vitg16_22k", quantization_config=quantization_config, dtype="auto", attn_implementation="sdpa") def infer(image): inputs = processor(image, return_tensors="pt") outputs = model(**inputs) return outputs.last_hidden_state.mean(dim=1) embed_1 = infer(image_1) embed_2 = infer(image_2) similarity = cosine_similarity(embed_1, embed_2) print(similarity) ``` ## IJepaConfig [[autodoc]] IJepaConfig ## IJepaModel [[autodoc]] IJepaModel - forward ## IJepaForImageClassification [[autodoc]] IJepaForImageClassification - forward
transformers/docs/source/en/model_doc/ijepa.md/0
{ "file_path": "transformers/docs/source/en/model_doc/ijepa.md", "repo_id": "transformers", "token_count": 1806 }
345
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2021-04-18 and added to Hugging Face Transformers on 2021-11-03.* # LayoutXLM <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview LayoutXLM was proposed in [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://huggingface.co/papers/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. It's a multilingual extension of the [LayoutLMv2 model](https://huggingface.co/papers/2012.14740) trained on 53 languages. The abstract from the paper is the following: *Multimodal pre-training with text, layout, and image has achieved SOTA performance for visually-rich document understanding tasks recently, which demonstrates the great potential for joint learning across different modalities. In this paper, we present LayoutXLM, a multimodal pre-trained model for multilingual document understanding, which aims to bridge the language barriers for visually-rich document understanding. To accurately evaluate LayoutXLM, we also introduce a multilingual form understanding benchmark dataset named XFUN, which includes form understanding samples in 7 languages (Chinese, Japanese, Spanish, French, Italian, German, Portuguese), and key-value pairs are manually labeled for each language. Experiment results show that the LayoutXLM model has significantly outperformed the existing SOTA cross-lingual pre-trained models on the XFUN dataset.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm). ## Usage tips and examples One can directly plug in the weights of LayoutXLM into a LayoutLMv2 model, like so: ```python from transformers import LayoutLMv2Model model = LayoutLMv2Model.from_pretrained("microsoft/layoutxlm-base") ``` Note that LayoutXLM has its own tokenizer, based on [`LayoutXLMTokenizer`]/[`LayoutXLMTokenizerFast`]. You can initialize it as follows: ```python from transformers import LayoutXLMTokenizer tokenizer = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base") ``` Similar to LayoutLMv2, you can use [`LayoutXLMProcessor`] (which internally applies [`LayoutLMv2ImageProcessor`] and [`LayoutXLMTokenizer`]/[`LayoutXLMTokenizerFast`] in sequence) to prepare all data for the model. <Tip> As LayoutXLM's architecture is equivalent to that of LayoutLMv2, one can refer to [LayoutLMv2's documentation page](layoutlmv2) for all tips, code examples and notebooks. </Tip> ## LayoutXLMTokenizer [[autodoc]] LayoutXLMTokenizer - __call__ - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## LayoutXLMTokenizerFast [[autodoc]] LayoutXLMTokenizerFast - __call__ ## LayoutXLMProcessor [[autodoc]] LayoutXLMProcessor - __call__
transformers/docs/source/en/model_doc/layoutxlm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/layoutxlm.md", "repo_id": "transformers", "token_count": 1077 }
346
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2020-10-02 and added to Hugging Face Transformers on 2021-05-03.* # LUKE <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The LUKE model was proposed in [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://huggingface.co/papers/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda and Yuji Matsumoto. It is based on RoBERTa and adds entity embeddings as well as an entity-aware self-attention mechanism, which helps improve performance on various downstream tasks involving reasoning about entities such as named entity recognition, extractive and cloze-style question answering, entity typing, and relation classification. The abstract from the paper is the following: *Entity representations are useful in natural language tasks involving entities. In this paper, we propose new pretrained contextualized representations of words and entities based on the bidirectional transformer. The proposed model treats words and entities in a given text as independent tokens, and outputs contextualized representations of them. Our model is trained using a new pretraining task based on the masked language model of BERT. The task involves predicting randomly masked words and entities in a large entity-annotated corpus retrieved from Wikipedia. We also propose an entity-aware self-attention mechanism that is an extension of the self-attention mechanism of the transformer, and considers the types of tokens (words or entities) when computing attention scores. The proposed model achieves impressive empirical performance on a wide range of entity-related tasks. In particular, it obtains state-of-the-art results on five well-known datasets: Open Entity (entity typing), TACRED (relation classification), CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), and SQuAD 1.1 (extractive question answering).* This model was contributed by [ikuyamada](https://huggingface.co/ikuyamada) and [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/studio-ousia/luke). ## Usage tips - This implementation is the same as [`RobertaModel`] with the addition of entity embeddings as well as an entity-aware self-attention mechanism, which improves performance on tasks involving reasoning about entities. - LUKE treats entities as input tokens; therefore, it takes `entity_ids`, `entity_attention_mask`, `entity_token_type_ids` and `entity_position_ids` as extra input. You can obtain those using [`LukeTokenizer`]. - [`LukeTokenizer`] takes `entities` and `entity_spans` (character-based start and end positions of the entities in the input text) as extra input. `entities` typically consist of [MASK] entities or Wikipedia entities. The brief description when inputting these entities are as follows: - *Inputting [MASK] entities to compute entity representations*: The [MASK] entity is used to mask entities to be predicted during pretraining. When LUKE receives the [MASK] entity, it tries to predict the original entity by gathering the information about the entity from the input text. Therefore, the [MASK] entity can be used to address downstream tasks requiring the information of entities in text such as entity typing, relation classification, and named entity recognition. - *Inputting Wikipedia entities to compute knowledge-enhanced token representations*: LUKE learns rich information (or knowledge) about Wikipedia entities during pretraining and stores the information in its entity embedding. By using Wikipedia entities as input tokens, LUKE outputs token representations enriched by the information stored in the embeddings of these entities. This is particularly effective for tasks requiring real-world knowledge, such as question answering. - There are three head models for the former use case: - [`LukeForEntityClassification`], for tasks to classify a single entity in an input text such as entity typing, e.g. the [Open Entity dataset](https://www.cs.utexas.edu/~eunsol/html_pages/open_entity.html). This model places a linear head on top of the output entity representation. - [`LukeForEntityPairClassification`], for tasks to classify the relationship between two entities such as relation classification, e.g. the [TACRED dataset](https://nlp.stanford.edu/projects/tacred/). This model places a linear head on top of the concatenated output representation of the pair of given entities. - [`LukeForEntitySpanClassification`], for tasks to classify the sequence of entity spans, such as named entity recognition (NER). This model places a linear head on top of the output entity representations. You can address NER using this model by inputting all possible entity spans in the text to the model. [`LukeTokenizer`] has a `task` argument, which enables you to easily create an input to these head models by specifying `task="entity_classification"`, `task="entity_pair_classification"`, or `task="entity_span_classification"`. Please refer to the example code of each head models. Usage example: ```python >>> from transformers import LukeTokenizer, LukeModel, LukeForEntityPairClassification >>> model = LukeModel.from_pretrained("studio-ousia/luke-base") >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base") # Example 1: Computing the contextualized entity representation corresponding to the entity mention "Beyoncé" >>> text = "Beyoncé lives in Los Angeles." >>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé" >>> inputs = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") >>> outputs = model(**inputs) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state # Example 2: Inputting Wikipedia entities to obtain enriched contextualized representations >>> entities = [ ... "Beyoncé", ... "Los Angeles", ... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles" >>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> inputs = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt") >>> outputs = model(**inputs) >>> word_last_hidden_state = outputs.last_hidden_state >>> entity_last_hidden_state = outputs.entity_last_hidden_state # Example 3: Classifying the relationship between two entities using LukeForEntityPairClassification head model >>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred") >>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles" >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> predicted_class_idx = int(logits[0].argmax()) >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` ## Resources - [A demo notebook on how to fine-tune [`LukeForEntityPairClassification`] for relation classification](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LUKE) - [Notebooks showcasing how you to reproduce the results as reported in the paper with the HuggingFace implementation of LUKE](https://github.com/studio-ousia/luke/tree/master/notebooks) - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## LukeConfig [[autodoc]] LukeConfig ## LukeTokenizer [[autodoc]] LukeTokenizer - __call__ - save_vocabulary ## LukeModel [[autodoc]] LukeModel - forward ## LukeForMaskedLM [[autodoc]] LukeForMaskedLM - forward ## LukeForEntityClassification [[autodoc]] LukeForEntityClassification - forward ## LukeForEntityPairClassification [[autodoc]] LukeForEntityPairClassification - forward ## LukeForEntitySpanClassification [[autodoc]] LukeForEntitySpanClassification - forward ## LukeForSequenceClassification [[autodoc]] LukeForSequenceClassification - forward ## LukeForMultipleChoice [[autodoc]] LukeForMultipleChoice - forward ## LukeForTokenClassification [[autodoc]] LukeForTokenClassification - forward ## LukeForQuestionAnswering [[autodoc]] LukeForQuestionAnswering - forward
transformers/docs/source/en/model_doc/luke.md/0
{ "file_path": "transformers/docs/source/en/model_doc/luke.md", "repo_id": "transformers", "token_count": 2616 }
347
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on {release_date} and added to Hugging Face Transformers on 2025-08-20.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # MetaCLIP 2 ## Overview MetaCLIP 2 is a replication of the original CLIP model trained on 300+ languages. It achieves state-of-the-art (SOTA) results on multilingual benchmarks (e.g., XM3600, CVQA, Babel‑ImageNet), surpassing previous SOTA such as [mSigLIP](siglip) and [SigLIP‑2](siglip2). The authors show that English and non-English worlds can mutually benefit and elevate each other. This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/MetaCLIP). You can find all the MetaCLIP 2 checkpoints under the [Meta](https://huggingface.co/facebook?search_models=metaclip-2) organization. > [!TIP] > Click on the MetaCLIP 2 models in the right sidebar for more examples of how to apply MetaCLIP 2 to different image and language tasks. The example below demonstrates how to calculate similarity scores between multiple text descriptions and an image with [`Pipeline`] or the [`AutoModel`] class. Usage of the MetaCLIP 2 models is identical to the CLIP models, you just need the `MetaClip2Model` class instead of `CLIPModel`. <hfoptions id="usage"> <hfoption id="Pipeline"> ```py import torch from transformers import pipeline clip = pipeline( task="zero-shot-image-classification", model="facebook/metaclip-2-worldwide-huge-quickgelu", dtype=torch.bfloat16, device=0 ) labels = ["a photo of a cat", "a photo of a dog", "a photo of a car"] clip("http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=labels) ``` </hfoption> <hfoption id="AutoModel"> ```py import requests import torch from PIL import Image from transformers import AutoProcessor, AutoModel model = AutoModel.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu", dtype=torch.bfloat16, attn_implementation="sdpa") processor = AutoProcessor.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) labels = ["a photo of a cat", "a photo of a dog", "a photo of a car"] inputs = processor(text=labels, images=image, return_tensors="pt", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image probs = logits_per_image.softmax(dim=1) most_likely_idx = probs.argmax(dim=1).item() most_likely_label = labels[most_likely_idx] print(f"Most likely label: {most_likely_label} with probability: {probs[0][most_likely_idx].item():.3f}") ``` </hfoption> </hfoptions> ## MetaClip2Config [[autodoc]] MetaClip2Config - from_text_vision_configs ## MetaClip2TextConfig [[autodoc]] MetaClip2TextConfig ## MetaClip2VisionConfig [[autodoc]] MetaClip2VisionConfig ## MetaClip2Model [[autodoc]] MetaClip2Model - forward - get_text_features - get_image_features ## MetaClip2TextModel [[autodoc]] MetaClip2TextModel - forward ## MetaClip2TextModelWithProjection [[autodoc]] MetaClip2TextModelWithProjection - forward ## MetaClip2VisionModelWithProjection [[autodoc]] MetaClip2VisionModelWithProjection - forward ## MetaClip2VisionModel [[autodoc]] MetaClip2VisionModel - forward ## MetaClip2ForImageClassification [[autodoc]] MetaClip2ForImageClassification - forward
transformers/docs/source/en/model_doc/metaclip_2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/metaclip_2.md", "repo_id": "transformers", "token_count": 1535 }
348
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2022-07-11 and added to Hugging Face Transformers on 2023-03-27.* # NLLB-MOE <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The NLLB model was presented in [No Language Left Behind: Scaling Human-Centered Machine Translation](https://huggingface.co/papers/2207.04672) by Marta R. Costa-jussà, James Cross, Onur Çelebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. The abstract of the paper is the following: *Driven by the goal of eradicating language barriers on a global scale, machine translation has solidified itself as a key focus of artificial intelligence research today. However, such efforts have coalesced around a small subset of languages, leaving behind the vast majority of mostly low-resource languages. What does it take to break the 200 language barrier while ensuring safe, high quality results, all while keeping ethical considerations in mind? In No Language Left Behind, we took on this challenge by first contextualizing the need for low-resource language translation support through exploratory interviews with native speakers. Then, we created datasets and models aimed at narrowing the performance gap between low and high-resource languages. More specifically, we developed a conditional compute model based on Sparsely Gated Mixture of Experts that is trained on data obtained with novel and effective data mining techniques tailored for low-resource languages. We propose multiple architectural and training improvements to counteract overfitting while training on thousands of tasks. Critically, we evaluated the performance of over 40,000 different translation directions using a human-translated benchmark, Flores-200, and combined human evaluation with a novel toxicity benchmark covering all languages in Flores-200 to assess translation safety. Our model achieves an improvement of 44% BLEU relative to the previous state-of-the-art, laying important groundwork towards realizing a universal translation system.* This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The original code can be found [here](https://github.com/facebookresearch/fairseq). ## Usage tips - M2M100ForConditionalGeneration is the base model for both NLLB and NLLB MoE - The NLLB-MoE is very similar to the NLLB model, but it's feed forward layer is based on the implementation of SwitchTransformers. - The tokenizer is the same as the NLLB models. ## Implementation differences with SwitchTransformers The biggest difference is the way the tokens are routed. NLLB-MoE uses a `top-2-gate` which means that for each input, only the top two experts are selected based on the highest predicted probabilities from the gating network, and the remaining experts are ignored. In `SwitchTransformers`, only the top-1 probabilities are computed, which means that tokens have less probability of being forwarded. Moreover, if a token is not routed to any expert, `SwitchTransformers` still adds its unmodified hidden states (kind of like a residual connection) while they are masked in `NLLB`'s top-2 routing mechanism. ## Generating with NLLB-MoE The available checkpoints require around 350GB of storage. Make sure to use `accelerate` if you do not have enough RAM on your machine. While generating the target text set the `forced_bos_token_id` to the target language id. The following example shows how to translate English to French using the *facebook/nllb-200-distilled-600M* model. Note that we're using the BCP-47 code for French `fra_Latn`. See [here](https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200) for the list of all BCP-47 in the Flores 200 dataset. ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b") >>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-moe-54b") >>> article = "Previously, Ring's CEO, Jamie Siminoff, remarked the company started when his doorbell wasn't audible from his shop in his garage." >>> inputs = tokenizer(article, return_tensors="pt") >>> translated_tokens = model.generate( ... **inputs, forced_bos_token_id=tokenizer.lang_code_to_id["fra_Latn"], max_length=50 ... ) >>> tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0] "Auparavant, le PDG de Ring, Jamie Siminoff, a fait remarquer que la société avait commencé lorsque sa sonnette n'était pas audible depuis son magasin dans son garage." ``` ### Generating from any other language than English English (`eng_Latn`) is set as the default language from which to translate. In order to specify that you'd like to translate from a different language, you should specify the BCP-47 code in the `src_lang` keyword argument of the tokenizer initialization. See example below for a translation from romanian to german: ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b", src_lang="ron_Latn") >>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-moe-54b") >>> article = "Şeful ONU spune că nu există o soluţie militară în Siria" >>> inputs = tokenizer(article, return_tensors="pt") >>> translated_tokens = model.generate( ... **inputs, forced_bos_token_id=tokenizer.lang_code_to_id["deu_Latn"], max_length=30 ... ) >>> tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0] ``` ## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## NllbMoeConfig [[autodoc]] NllbMoeConfig ## NllbMoeTop2Router [[autodoc]] NllbMoeTop2Router - route_tokens - forward ## NllbMoeSparseMLP [[autodoc]] NllbMoeSparseMLP - forward ## NllbMoeModel [[autodoc]] NllbMoeModel - forward ## NllbMoeForConditionalGeneration [[autodoc]] NllbMoeForConditionalGeneration - forward
transformers/docs/source/en/model_doc/nllb-moe.md/0
{ "file_path": "transformers/docs/source/en/model_doc/nllb-moe.md", "repo_id": "transformers", "token_count": 2095 }
349
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> *This model was released on 2022-11-02 and added to Hugging Face Transformers on 2023-08-21.* # Pop2Piano <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The Pop2Piano model was proposed in [Pop2Piano : Pop Audio-based Piano Cover Generation](https://huggingface.co/papers/2211.00895) by Jongho Choi and Kyogu Lee. Piano covers of pop music are widely enjoyed, but generating them from music is not a trivial task. It requires great expertise with playing piano as well as knowing different characteristics and melodies of a song. With Pop2Piano you can directly generate a cover from a song's audio waveform. It is the first model to directly generate a piano cover from pop audio without melody and chord extraction modules. Pop2Piano is an encoder-decoder Transformer model based on [T5](https://huggingface.co/papers/1910.10683). The input audio is transformed to its waveform and passed to the encoder, which transforms it to a latent representation. The decoder uses these latent representations to generate token ids in an autoregressive way. Each token id corresponds to one of four different token types: time, velocity, note and 'special'. The token ids are then decoded to their equivalent MIDI file. The abstract from the paper is the following: *Piano covers of pop music are enjoyed by many people. However, the task of automatically generating piano covers of pop music is still understudied. This is partly due to the lack of synchronized {Pop, Piano Cover} data pairs, which made it challenging to apply the latest data-intensive deep learning-based methods. To leverage the power of the data-driven approach, we make a large amount of paired and synchronized {Pop, Piano Cover} data using an automated pipeline. In this paper, we present Pop2Piano, a Transformer network that generates piano covers given waveforms of pop music. To the best of our knowledge, this is the first model to generate a piano cover directly from pop audio without using melody and chord extraction modules. We show that Pop2Piano, trained with our dataset, is capable of producing plausible piano covers.* This model was contributed by [Susnato Dhar](https://huggingface.co/susnato). The original code can be found [here](https://github.com/sweetcocoa/pop2piano). ## Usage tips * To use Pop2Piano, you will need to install the 🤗 Transformers library, as well as the following third party modules: ```bash pip install pretty-midi==0.2.9 essentia==2.1b6.dev1034 librosa scipy ``` Please note that you may need to restart your runtime after installation. * Pop2Piano is an Encoder-Decoder based model like T5. * Pop2Piano can be used to generate midi-audio files for a given audio sequence. * Choosing different composers in `Pop2PianoForConditionalGeneration.generate()` can lead to variety of different results. * Setting the sampling rate to 44.1 kHz when loading the audio file can give good performance. * Though Pop2Piano was mainly trained on Korean Pop music, it also does pretty well on other Western Pop or Hip Hop songs. ## Examples - Example using HuggingFace Dataset: ```python >>> from datasets import load_dataset >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoProcessor >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano") >>> ds = load_dataset("sweetcocoa/pop2piano_ci", split="test") >>> inputs = processor( ... audio=ds["audio"][0]["array"], sampling_rate=ds["audio"][0]["sampling_rate"], return_tensors="pt" ... ) >>> model_output = model.generate(input_features=inputs["input_features"], composer="composer1") >>> tokenizer_output = processor.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"][0] >>> tokenizer_output.write("./Outputs/midi_output.mid") ``` - Example using your own audio file: ```python >>> import librosa >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoProcessor >>> audio, sr = librosa.load("<your_audio_file_here>", sr=44100) # feel free to change the sr to a suitable value. >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano") >>> inputs = processor(audio=audio, sampling_rate=sr, return_tensors="pt") >>> model_output = model.generate(input_features=inputs["input_features"], composer="composer1") >>> tokenizer_output = processor.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"][0] >>> tokenizer_output.write("./Outputs/midi_output.mid") ``` - Example of processing multiple audio files in batch: ```python >>> import librosa >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoProcessor >>> # feel free to change the sr to a suitable value. >>> audio1, sr1 = librosa.load("<your_first_audio_file_here>", sr=44100) >>> audio2, sr2 = librosa.load("<your_second_audio_file_here>", sr=44100) >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano") >>> inputs = processor(audio=[audio1, audio2], sampling_rate=[sr1, sr2], return_attention_mask=True, return_tensors="pt") >>> # Since we now generating in batch(2 audios) we must pass the attention_mask >>> model_output = model.generate( ... input_features=inputs["input_features"], ... attention_mask=inputs["attention_mask"], ... composer="composer1", ... ) >>> tokenizer_output = processor.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"] >>> # Since we now have 2 generated MIDI files >>> tokenizer_output[0].write("./Outputs/midi_output1.mid") >>> tokenizer_output[1].write("./Outputs/midi_output2.mid") ``` - Example of processing multiple audio files in batch (Using `Pop2PianoFeatureExtractor` and `Pop2PianoTokenizer`): ```python >>> import librosa >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoFeatureExtractor, Pop2PianoTokenizer >>> # feel free to change the sr to a suitable value. >>> audio1, sr1 = librosa.load("<your_first_audio_file_here>", sr=44100) >>> audio2, sr2 = librosa.load("<your_second_audio_file_here>", sr=44100) >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> feature_extractor = Pop2PianoFeatureExtractor.from_pretrained("sweetcocoa/pop2piano") >>> tokenizer = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano") >>> inputs = feature_extractor( ... audio=[audio1, audio2], ... sampling_rate=[sr1, sr2], ... return_attention_mask=True, ... return_tensors="pt", ... ) >>> # Since we now generating in batch(2 audios) we must pass the attention_mask >>> model_output = model.generate( ... input_features=inputs["input_features"], ... attention_mask=inputs["attention_mask"], ... composer="composer1", ... ) >>> tokenizer_output = tokenizer.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"] >>> # Since we now have 2 generated MIDI files >>> tokenizer_output[0].write("./Outputs/midi_output1.mid") >>> tokenizer_output[1].write("./Outputs/midi_output2.mid") ``` ## Pop2PianoConfig [[autodoc]] Pop2PianoConfig ## Pop2PianoFeatureExtractor [[autodoc]] Pop2PianoFeatureExtractor - __call__ ## Pop2PianoForConditionalGeneration [[autodoc]] Pop2PianoForConditionalGeneration - forward - generate ## Pop2PianoTokenizer [[autodoc]] Pop2PianoTokenizer - __call__ ## Pop2PianoProcessor [[autodoc]] Pop2PianoProcessor - __call__
transformers/docs/source/en/model_doc/pop2piano.md/0
{ "file_path": "transformers/docs/source/en/model_doc/pop2piano.md", "repo_id": "transformers", "token_count": 2622 }
350
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2021-01-19 and added to Hugging Face Transformers on 2021-10-26.* # UniSpeech <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The UniSpeech model was proposed in [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://huggingface.co/papers/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang . The abstract from the paper is the following: *In this paper, we propose a unified pre-training approach called UniSpeech to learn speech representations with both unlabeled and labeled data, in which supervised phonetic CTC learning and phonetically-aware contrastive self-supervised learning are conducted in a multi-task learning manner. The resultant representations can capture information more correlated with phonetic structures and improve the generalization across languages and domains. We evaluate the effectiveness of UniSpeech for cross-lingual representation learning on public CommonVoice corpus. The results show that UniSpeech outperforms self-supervised pretraining and supervised transfer learning for speech recognition by a maximum of 13.4% and 17.8% relative phone error rate reductions respectively (averaged over all testing languages). The transferability of UniSpeech is also demonstrated on a domain-shift speech recognition task, i.e., a relative word error rate reduction of 6% against the previous approach.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be found [here](https://github.com/microsoft/UniSpeech/tree/main/UniSpeech). ## Usage tips - UniSpeech is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use [`Wav2Vec2Processor`] for the feature extraction. - UniSpeech model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. > [!NOTE] > The `head_mask` argument is ignored when using all attention implementation other than "eager". If you have a `head_mask` and want it to have effect, load the model with `XXXModel.from_pretrained(model_id, attn_implementation="eager")` ## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) ## UniSpeechConfig [[autodoc]] UniSpeechConfig ## UniSpeech specific outputs [[autodoc]] models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput ## UniSpeechModel [[autodoc]] UniSpeechModel - forward ## UniSpeechForCTC [[autodoc]] UniSpeechForCTC - forward ## UniSpeechForSequenceClassification [[autodoc]] UniSpeechForSequenceClassification - forward ## UniSpeechForPreTraining [[autodoc]] UniSpeechForPreTraining - forward
transformers/docs/source/en/model_doc/unispeech.md/0
{ "file_path": "transformers/docs/source/en/model_doc/unispeech.md", "repo_id": "transformers", "token_count": 1122 }
351
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2021-05-02 and added to Hugging Face Transformers on 2022-01-29.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # XLM-RoBERTa-XL [XLM-RoBERTa-XL](https://huggingface.co/papers/2105.00572) is a 3.5B parameter multilingual masked language model pretrained on 100 languages. It shows that by scaling model capacity, multilingual models demonstrates strong performance on high-resource languages and can even zero-shot low-resource languages. You can find all the original XLM-RoBERTa-XL checkpoints under the [AI at Meta](https://huggingface.co/facebook?search_models=xlm) organization. > [!TIP] > Click on the XLM-RoBERTa-XL models in the right sidebar for more examples of how to apply XLM-RoBERTa-XL to different cross-lingual tasks like classification, translation, and question answering. The example below demonstrates how to predict the `<mask>` token with [`Pipeline`], [`AutoModel`], and from the command line. <hfoptions id="usage"> <hfoption id="Pipeline"> ```python import torch from transformers import pipeline pipeline = pipeline( task="fill-mask", model="facebook/xlm-roberta-xl", dtype=torch.float16, device=0 ) pipeline("Bonjour, je suis un modèle <mask>.") ``` </hfoption> <hfoption id="AutoModel"> ```python import torch from transformers import AutoModelForMaskedLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( "facebook/xlm-roberta-xl", ) model = AutoModelForMaskedLM.from_pretrained( "facebook/xlm-roberta-xl", dtype=torch.float16, device_map="auto", attn_implementation="sdpa" ) inputs = tokenizer("Bonjour, je suis un modèle <mask>.", return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model(**inputs) predictions = outputs.logits masked_index = torch.where(inputs['input_ids'] == tokenizer.mask_token_id)[1] predicted_token_id = predictions[0, masked_index].argmax(dim=-1) predicted_token = tokenizer.decode(predicted_token_id) print(f"The predicted token is: {predicted_token}") ``` </hfoption> <hfoption id="transformers CLI"> ```bash echo -e "Plants create <mask> through a process known as photosynthesis." | transformers-cli run --task fill-mask --model facebook/xlm-roberta-xl --device 0 ``` </hfoption> </hfoptions> Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends. The example below uses [torchao](../quantization/torchao) to only quantize the weights to int4. ```py import torch from transformers import AutoModelForMaskedLM, AutoTokenizer, TorchAoConfig quantization_config = TorchAoConfig("int4_weight_only", group_size=128) tokenizer = AutoTokenizer.from_pretrained( "facebook/xlm-roberta-xl", ) model = AutoModelForMaskedLM.from_pretrained( "facebook/xlm-roberta-xl", dtype=torch.float16, device_map="auto", attn_implementation="sdpa", quantization_config=quantization_config ) inputs = tokenizer("Bonjour, je suis un modèle <mask>.", return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model(**inputs) predictions = outputs.logits masked_index = torch.where(inputs['input_ids'] == tokenizer.mask_token_id)[1] predicted_token_id = predictions[0, masked_index].argmax(dim=-1) predicted_token = tokenizer.decode(predicted_token_id) print(f"The predicted token is: {predicted_token}") ``` ## Notes - Unlike some XLM models, XLM-RoBERTa-XL doesn't require `lang` tensors to understand which language is used. It automatically determines the language from the input ids. ## XLMRobertaXLConfig [[autodoc]] XLMRobertaXLConfig ## XLMRobertaXLModel [[autodoc]] XLMRobertaXLModel - forward ## XLMRobertaXLForCausalLM [[autodoc]] XLMRobertaXLForCausalLM - forward ## XLMRobertaXLForMaskedLM [[autodoc]] XLMRobertaXLForMaskedLM - forward ## XLMRobertaXLForSequenceClassification [[autodoc]] XLMRobertaXLForSequenceClassification - forward ## XLMRobertaXLForMultipleChoice [[autodoc]] XLMRobertaXLForMultipleChoice - forward ## XLMRobertaXLForTokenClassification [[autodoc]] XLMRobertaXLForTokenClassification - forward ## XLMRobertaXLForQuestionAnswering [[autodoc]] XLMRobertaXLForQuestionAnswering - forward
transformers/docs/source/en/model_doc/xlm-roberta-xl.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlm-roberta-xl.md", "repo_id": "transformers", "token_count": 1850 }
352
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Loading models Transformers provides many pretrained models that are ready to use with a single line of code. It requires a model class and the [`~PreTrainedModel.from_pretrained`] method. Call [`~PreTrainedModel.from_pretrained`] to download and load a model's weights and configuration stored on the Hugging Face [Hub](https://hf.co/models). > [!TIP] > The [`~PreTrainedModel.from_pretrained`] method loads weights stored in the [safetensors](https://hf.co/docs/safetensors/index) file format if they're available. Traditionally, PyTorch model weights are serialized with the [pickle](https://docs.python.org/3/library/pickle.html) utility which is known to be unsecure. Safetensor files are more secure and faster to load. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", dtype="auto", device_map="auto") ``` This guide explains how models are loaded, the different ways you can load a model, how to overcome memory issues for really big models, and how to load custom models. ## Models and configurations All models have a `configuration.py` file with specific attributes like the number of hidden layers, vocabulary size, activation function, and more. You'll also find a `modeling.py` file that defines the layers and mathematical operations taking place inside each layer. The `modeling.py` file takes the model attributes in `configuration.py` and builds the model accordingly. At this point, you have a model with random weights that needs to be trained to output meaningful results. <!-- insert diagram of model and configuration --> > [!TIP] > An *architecture* refers to the model's skeleton and a *checkpoint* refers to the model's weights for a given architecture. For example, [BERT](./model_doc/bert) is an architecture while [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) is a checkpoint. You'll see the term *model* used interchangeably with architecture and checkpoint. There are two general types of models you can load: 1. A barebones model, like [`AutoModel`] or [`LlamaModel`], that outputs hidden states. 2. A model with a specific *head* attached, like [`AutoModelForCausalLM`] or [`LlamaForCausalLM`], for performing specific tasks. For each model type, there is a separate class for each machine learning framework (PyTorch, TensorFlow, Flax). Pick the corresponding prefix for the framework you're using. <hfoptions id="backend"> <hfoption id="PyTorch"> ```py from transformers import AutoModelForCausalLM, MistralForCausalLM # load with AutoClass or model-specific class model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", dtype="auto", device_map="auto") model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", dtype="auto", device_map="auto") ``` </hfoption> <hfoption id="TensorFlow"> ```py from transformers import TFAutoModelForCausalLM, TFMistralForCausalLM # load with AutoClass or model-specific class model = TFAutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") model = TFMistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") ``` </hfoption> <hfoption id="Flax"> ```py from transformers import FlaxAutoModelForCausalLM, FlaxMistralForCausalLM # load with AutoClass or model-specific class model = FlaxAutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") model = FlaxMistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") ``` </hfoption> </hfoptions> ## Model classes To get a pretrained model, you need to load the weights into the model. This is done by calling [`~PreTrainedModel.from_pretrained`] which accepts weights from the Hugging Face Hub or a local directory. There are two model classes, the [AutoModel](./model_doc/auto) class and a model-specific class. <hfoptions id="model-classes"> <hfoption id="AutoModel"> <Youtube id="AhChOFRegn4"/> The [AutoModel](./model_doc/auto) class is a convenient way to load an architecture without needing to know the exact model class name because there are many models available. It automatically selects the correct model class based on the configuration file. You only need to know the task and checkpoint you want to use. Easily switch between models or tasks, as long as the architecture is supported for a given task. For example, the same model can be used for separate tasks. ```py from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoModelForQuestionAnswering # use the same API for 3 different tasks model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf") model = AutoModelForSequenceClassification.from_pretrained("meta-llama/Llama-2-7b-hf") model = AutoModelForQuestionAnswering.from_pretrained("meta-llama/Llama-2-7b-hf") ``` In other cases, you may want to quickly try out several different models for a task. ```py from transformers import AutoModelForCausalLM # use the same API to load 3 different models model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf") model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b") ``` </hfoption> <hfoption id="model-specific class"> The [AutoModel](./model_doc/auto) class builds on top of model-specific classes. All model classes that support a specific task are mapped to their respective `AutoModelFor` task class. If you already know which model class you want to use, then you could use its model-specific class directly. ```py from transformers import LlamaModel, LlamaForCausalLM model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf") ``` </hfoption> </hfoptions> ## Large models Large pretrained models require a lot of memory to load. The loading process involves: 1. creating a model with random weights 2. loading the pretrained weights 3. placing the pretrained weights on the model You need enough memory to hold two copies of the model weights (random and pretrained) which may not be possible depending on your hardware. In distributed training environments, this is even more challenging because each process loads a pretrained model. Transformers reduces some of these memory-related challenges with fast initialization, sharded checkpoints, Accelerate's [Big Model Inference](https://hf.co/docs/accelerate/usage_guides/big_modeling) feature, and supporting lower bit data types. ### Sharded checkpoints The [`~PreTrainedModel.save_pretrained`] method automatically shards checkpoints larger than 10GB. Each shard is loaded sequentially after the previous shard is loaded, limiting memory usage to only the model size and the largest shard size. The `max_shard_size` parameter defaults to 5GB for each shard because it is easier to run on free-tier GPU instances without running out of memory. For example, create some shards checkpoints for [BioMistral/BioMistral-7B](https://hf.co/BioMistral/BioMistral-7B) in [`~PreTrainedModel.save_pretrained`]. ```py from transformers import AutoModel import tempfile import os model = AutoModel.from_pretrained("biomistral/biomistral-7b") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, max_shard_size="5GB") print(sorted(os.listdir(tmp_dir))) ``` Reload the sharded checkpoint with [`~PreTrainedModel.from_pretrained`]. ```py with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = AutoModel.from_pretrained(tmp_dir) ``` Sharded checkpoints can also be directly loaded with [`~transformers.modeling_utils.load_sharded_checkpoint`]. ```py from transformers.modeling_utils import load_sharded_checkpoint with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, max_shard_size="5GB") load_sharded_checkpoint(model, tmp_dir) ``` The [`~PreTrainedModel.save_pretrained`] method creates an index file that maps parameter names to the files they're stored in. The index file has two keys, `metadata` and `weight_map`. ```py import json with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, max_shard_size="5GB") with open(os.path.join(tmp_dir, "model.safetensors.index.json"), "r") as f: index = json.load(f) print(index.keys()) ``` The `metadata` key provides the total model size. ```py index["metadata"] {'total_size': 28966928384} ``` The `weight_map` key maps each parameter to the shard it's stored in. ```py index["weight_map"] {'lm_head.weight': 'model-00006-of-00006.safetensors', 'model.embed_tokens.weight': 'model-00001-of-00006.safetensors', 'model.layers.0.input_layernorm.weight': 'model-00001-of-00006.safetensors', 'model.layers.0.mlp.down_proj.weight': 'model-00001-of-00006.safetensors', ... } ``` ### Big Model Inference > [!TIP] > Make sure you have Accelerate v0.9.0 and PyTorch v1.9.0 or later installed to use this feature! <Youtube id="MWCSGj9jEAo"/> [`~PreTrainedModel.from_pretrained`] is supercharged with Accelerate's [Big Model Inference](https://hf.co/docs/accelerate/usage_guides/big_modeling) feature. Big Model Inference creates a *model skeleton* on the PyTorch [meta](https://pytorch.org/docs/main/meta.html) device. The meta device doesn't store any real data, only the metadata. Randomly initialized weights are only created when the pretrained weights are loaded to avoid maintaining two copies of the model in memory at the same time. The maximum memory usage is only the size of the model. > [!TIP] > Learn more about device placement in [Designing a device map](https://hf.co/docs/accelerate/v0.33.0/en/concept_guides/big_model_inference#designing-a-device-map). Big Model Inference's second feature relates to how weights are loaded and dispatched in the model skeleton. Model weights are dispatched across all available devices, starting with the fastest device (usually the GPU) and then offloading any remaining weights to slower devices (CPU and hard drive). Both features combined reduces memory usage and loading times for big pretrained models. Set [device_map](https://github.com/huggingface/transformers/blob/026a173a64372e9602a16523b8fae9de4b0ff428/src/transformers/modeling_utils.py#L3061) to `"auto"` to enable Big Model Inference. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto") ``` You can also manually assign layers to a device in `device_map`. It should map all model parameters to a device, but you don't have to detail where all the submodules of a layer go if the entire layer is on the same device. Access the `hf_device_map` attribute to see how a model is distributed across devices. ```py device_map = {"model.layers.1": 0, "model.layers.14": 1, "model.layers.31": "cpu", "lm_head": "disk"} model.hf_device_map ``` ### Model data type PyTorch model weights are initialized in `torch.float32` by default. Loading a model in a different data type, like `torch.float16`, requires additional memory because the model is loaded again in the desired data type. Explicitly set the [dtype](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype) parameter to directly initialize the model in the desired data type instead of loading the weights twice (`torch.float32` then `torch.float16`). You could also set `dtype="auto"` to automatically load the weights in the data type they are stored in. <hfoptions id="dtype"> <hfoption id="specific dtype"> ```py import torch from transformers import AutoModelForCausalLM gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", dtype=torch.float16) ``` </hfoption> <hfoption id="auto dtype"> ```py from transformers import AutoModelForCausalLM gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", dtype="auto") ``` </hfoption> </hfoptions> The `dtype` parameter can also be configured in [`AutoConfig`] for models instantiated from scratch. ```py import torch from transformers import AutoConfig, AutoModel my_config = AutoConfig.from_pretrained("google/gemma-2b", dtype=torch.float16) model = AutoModel.from_config(my_config) ``` ## Custom models Custom models builds on Transformers' configuration and modeling classes, supports the [AutoClass](#autoclass) API, and are loaded with [`~PreTrainedModel.from_pretrained`]. The difference is that the modeling code is *not* from Transformers. Take extra precaution when loading a custom model. While the Hub includes [malware scanning](https://hf.co/docs/hub/security-malware#malware-scanning) for every repository, you should still be careful to avoid inadvertently executing malicious code. Set `trust_remote_code=True` in [`~PreTrainedModel.from_pretrained`] to load a custom model. ```py from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True) ``` As an extra layer of security, load a custom model from a specific revision to avoid loading model code that may have changed. The commit hash can be copied from the models [commit history](https://hf.co/sgugger/custom-resnet50d/commits/main). ```py commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292" model = AutoModelForImageClassification.from_pretrained( "sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash ) ``` Refer to the [Customize models](./custom_models) guide for more information.
transformers/docs/source/en/models.md/0
{ "file_path": "transformers/docs/source/en/models.md", "repo_id": "transformers", "token_count": 4285 }
353
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPU GPUs are commonly used to train deep learning models due to their high memory bandwidth and parallel processing capabilities. Depending on your GPU and model size, it is possible to even train models with billions of parameters. The key is to find the right balance between GPU memory utilization (data throughput/training time) and training speed. This guide will show you the features available in Transformers and PyTorch for efficiently training a model on GPUs. In many cases, you'll want to use a combination of these features to optimize training. Refer to the table below to quickly help you identify the features relevant to your training scenario. | Feature | Training speed | Memory usage | |---|---|---| | batch size | yes | yes | | gradient accumulation | no | yes | | gradient checkpointing | no | yes | | mixed precision | yes | depends | | optimizers | yes | yes | | data preloading | yes | no | | torch_empty_cache_steps | no | yes | | torch.compile | yes | no | | scaled dot production attention (SDPA) | yes | yes | ## Trainer [Trainer](./trainer) supports many useful training features that can be configured through [`TrainingArguments`]. This section highlights some of the more important features for optimizing training. ### Batch size Batch size is one of the most important hyperparameters for efficient GPU training because it affects memory usage and training speed. Larger batch sizes lead to faster training because it takes advantage of a GPUs parallel processing power. It is recommended to use batch sizes that are powers of 2, such as 8, 64, 128, 256, 512, etc. The batch size depends on your GPU and the models data type. Configure [`~TrainingArguments.per_device_train_batch_size`] in [`TrainingArguments`]. ```py from transformers import TrainingArguments args = TrainingArguments( per_device_train_batch_size=256, per_device_eval_batch_size=256, ) ``` Refer to the NVIDIA [Performance](https://docs.nvidia.com/deeplearning/performance/dl-performance-fully-connected/index.html#input-features) guide to learn more about how input features and output neuron counts and batch size affect performance. These are involved in the General Matrix Multiplications (GEMMs) performed by the GPU. Larger parameters are better for parallelization and efficiency. The [Tensor Core Requirements](https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc) section is also useful for selecting a batch size that maximizes the speed of tensor multiplication based on the data type and GPU. For example, multiples of 8 are recommended for fp16, unless it's an A100 GPU, in which case use multiples of 64. Finally, consider [Dimension Quantization Effects](https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#dim-quantization) for smaller parameters. Tile quantization results when matrix dimensions aren't divisible by a GPUs thread block tile size, causing the GPU to underutilize its resources. Selecting the correct batch size multiplier, such that the matrix is divisible by the tile size, can significantly speed up training. ### Gradient accumulation Gradient accumulation overcomes memory constraints - useful for fitting a very large model that otherwise wouldn't fit on a single GPU - by accumulating gradients over multiple mini-batches before updating the parameters. This reduces memory by storing fewer gradients and enables training with a larger *effective batch size* because usually, the parameters are updated from a single batch of data. Training can slow down though due to the additional forward and backward passes introduced by gradient accumulation. Configure [`~TrainingArguments.per_device_train_batch_size`] in [`TrainingArguments`] to enable gradient accumulation. ```py from transformers import TrainingArguments # effective batch size of 64 args = TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=16, ) ``` Try to avoid too many gradient accumulation steps because it can really slow down training. Consider the example below, where the maximum batch size that'll fit on your GPU is 4. You should keep your batch size at 4 to better utilize the GPU. | batch size | gradient accumulation steps | effective batch size | | |---|---|---|---| | 1 | 64 | 64 | 👎 | | 4 | 16 | 64 | 👍 | ### Gradient checkpointing Gradient checkpointing reduces memory usage by only storing some of the intermediate activations during the backward pass and recomputing the remaining activations. This avoids storing *all* of the intermediate activations from the forward pass, which can require a lot of memory overhead. However, it comes at the cost of slower training speed (~20%). Configure [`~TrainingArguments.gradient_checkpointing`] in [`TrainingArguments`] to enable gradient checkpointing. ```py from transformers import TrainingArguments args = TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=16, gradient_checkpointing=True, ) ``` ### Mixed precision Mixed precision accelerates training speed by performing some calculations in half-precision (fp16) and some in full-precision (fp32). The half-precision calculations boosts training speed because it's not as computationally expensive as performing the calculations in full-precision. Meanwhile, preserving some of the calculations in full-precision maintains accuracy. There are several data types available for mixed precision training. <hfoptions id="mixed-precision"> <hfoption id="fp16"> The main advantage of mixed precision training is saving the activations in fp16. Configure [`~TrainingArguments.fp16`] in [`TrainingArguments`] to enable mixed precision training with the fp16 data type. ```py from transformers import TrainingArguments args = TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=16, gradient_checkpointing=True, fp16=True. ) ``` fp16 isn't memory-optimized because the gradients that are computed in fp16 are converted back to fp32 during the optimization step. You may end up using more GPU memory, especially for small batch sizes, because there are now two versions (fp16 and fp32) of the model on the GPU. </hfoption> <hfoption id="bf16"> [bf16](https://cloud.google.com/blog/products/ai-machine-learning/bfloat16-the-secret-to-high-performance-on-cloud-tpus) trades off some precision for a much larger dynamic range, which is helpful for avoiding overflow and underflow errors. You can use bf16 without adding any loss scaling methods like you would with fp16. bf16 is supported by NVIDIAs Ampere architecture or newer. Configure [`~TrainingArguments.bf16`] in [`TrainingArguments`] to enable mixed precision training with the bf16 data type. ```py from transformers import TrainingArguments args = TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=16, gradient_checkpointing=True, bf16=True, ) ``` </hfoption> <hfoption id="tf32"> [tf32](https://blogs.nvidia.com/blog/tensorfloat-32-precision-format/) is a mode on NVIDIA Ampere GPUs that convert the convolution and matrix multiplication inputs to tf32. All other storage and operations are kept in fp32. This allows tf32 to maintain the same range as fp32, the same precision as fp16 and more precision than bf16. Combining tf32 with fp16 or bf16 mixed precision training can improve throughput by 16x. tf32 is enabled by default on NVIDIA Ampere GPUs, but you can also add the code below to your fp32 training or inference code to explicitly enable it. ```py import torch torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True ``` Configure [tf32()](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.tf32) in [`TrainingArguments`] to enable mixed precision training with tf32 mode. ```py from transformers import TrainingArguments args = TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=16, gradient_checkpointing=True, bf16=True. tf32=True, ) ``` </hfoption> </hfoptions> ### Optimizers Transformers implements the [AdamW (adamw_torch)](https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html) optimizer from PyTorch by default. But because it stores a weighted average of past gradients, it requires additional memory proportional to the number of model parameters to store the past gradients. This can be an issue when training very large models, and in such cases, you should consider choosing a different optimizer. For example, if you have [Apex](https://nvidia.github.io/apex/index.html) installed on either [NVIDIA](https://github.com/NVIDIA/apex) or [AMD](https://github.com/ROCm/apex), then using the `adamw_apex_fused` optimizer provides the fastest training for all AdamW optimizers. Configure [`~TrainingArguments.optim`] in [`TrainingArguments`] to choose an optimizer. ```py from transformers import TrainingArguments args = TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=16, gradient_checkpointing=True, bf16=True, optim="adamw_bnb_8bit" ) ``` There are many optimizers to choose from (refer to [OptimizerNames](https://github.com/huggingface/transformers/blob/34f4080ff59b1668d919a1ba9f8bc4a3a2a3f478/src/transformers/training_args.py#L145) for a full supported list) depending on your training scenario. For example, Adafactor can significantly reduce memory requirements by storing a weighted average of a row or column instead of each element in the matrix at the cost of slower convergence. Another example is using a [8-bit AdamW optimizer](https://huggingface.co/docs/bitsandbytes) from bitsandbytes to quantize optimizer states. The optimizer state is stored in a lower precision and dequantized before being used in the optimizer step. Refer to the [optimizer](./optimizers) guide for to learn about more specialized optimizers. ### Data preloading Data preloading loads and prepares batches of data in advance on the CPU to ensure the GPU is continuously working, reducing GPU idling and increasing utilization. There are two ways to preload data to ensure the GPU is always working. 1. Allocate pinned memory on the CPU to store the data and transfer it directly to the GPU. 2. Increase the number of CPU threads or workers to preload the data faster. Configure [`~TrainingArguments.dataloader_pin_memory`] and [`~TrainingArguments.dataloader_num_workers`] in [`TrainingArguments`] to allocate pinned memory and increase the number of workers. ```py from transformers import TrainingArguments args = TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=16, gradient_checkpointing=True, bf16=True, optim="adamw_bnb_8bit", dataloader_pin_memory=True, dataloader_num_workers=4, ) ``` ## PyTorch PyTorch provides several features for reducing memory requirements and increasing training speed. These features can often be enabled in Transformers by only adding a few lines of code. ### torch.empty_cache_steps The [torch.cuda.empty_cache](https://pytorch.org/docs/stable/generated/torch.cuda.empty_cache.html#torch.cuda.empty_cache) function releases unused cached memory, which can help avoid out-of-memory (OOM) errors at the cost of ~10% slower training. Use [torch_empty_cache_steps()](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.torch_empty_cache_steps) in [`TrainingArguments`] to enable it after a certain number of training steps. ```py from transformers import TrainingArguments args = TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=16, gradient_checkpointing=True, bf16=True, optim="adamw_bnb_8bit", dataloader_pin_memory=True, dataloader_num_workers=4, torch_empty_cache_steps=4, ) ``` ### torch.compile [torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) compiles PyTorch code into optimized kernels that significantly speed up training. This feature relies on TorchDynamo to capture PyTorch graphs with the Frame Evaluation API. The graph can be further compiled into optimized kernels for different backends. Configure [`~TrainingArguments.torch_compile`] in [`TrainingArguments`] to enable it, and configure [torch_compile_backend()](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.torch_compile_backend) to select a backend to use. ```py from transformers import TrainingArguments args = TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=16, gradient_checkpointing=True, bf16=True, optim="adamw_bnb_8bit", dataloader_pin_memory=True, dataloader_num_workers=4, torch_empty_cache_steps=4, torch_compile=True, torch_compile_backend="inductor" ) ``` Refer to the table below to help you choose the right backend for your training scenario. | backend | description | goal | |---|---|---| | eager | uses PyTorch to run extracted GraphModule | debugging | | aot_eager | uses PyTorch eager mode for AOTAutograd's extracted forward and backward graphs | debugging | | inductor | uses TorchInductor with AOTAutograd and CUDA Graphs by leveraging Triton kernels | training and inference | | nvfuser | uses nvFuser with TorchScript | training and inference | | aot_nvfuser | uses nvFuser with AOTAutograd | training and inference | | aot_cudagraphs | uses CUDA Graphs with AOTAutograd | training and inference | | ofi | uses TorchScripts [optimize_for_inference](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html#torch-jit-optimize-for-inference) | inference | | fx2trt | uses [Torch-TensorRT](https://pytorch.org/TensorRT/tutorials/getting_started_with_fx_path.html) | inference | | onnxrt | uses [ONNX-RT](https://onnxruntime.ai/) for CPU and GPU inference | inference | | ipex | uses [IPEX](https://github.com/intel/intel-extension-for-pytorch) for CPU inference | inference | ### Scaled dot production attention [torch.nn.functional.scaled_dot_product_attention](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) (SDPA) is a native PyTorch implementation of the scaled dot product attention mechanism. SDPA is more efficient and optimized than the original attention mechanism in transformer models. It supports three types of scaled dot product attention. - [FlashAttention2](https://github.com/Dao-AILab/flash-attention) is automatically enabled for models with the fp16 or bf16 torch type. Make sure to cast your model to the appropriate type first. - [xFormers](https://github.com/facebookresearch/xformers) or Memory-Efficient Attention supports models with the fp32 torch type. - C++ implementation of scaled dot product attention. SDPA is enabled by default for PyTorch 2.1.1+, but it can be explicitly enabled by setting `attn_implementation="sdpa"` in [`~PreTrainedModel.from_pretrained`]. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.1-8B", device_map="auto", attn_implementation="sdpa") ```
transformers/docs/source/en/perf_train_gpu_one.md/0
{ "file_path": "transformers/docs/source/en/perf_train_gpu_one.md", "repo_id": "transformers", "token_count": 4509 }
354
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quickstart [[open-in-colab]] Transformers is designed to be fast and easy to use so that everyone can start learning or building with transformer models. The number of user-facing abstractions is limited to only three classes for instantiating a model, and two APIs for inference or training. This quickstart introduces you to Transformers' key features and shows you how to: - load a pretrained model - run inference with [`Pipeline`] - fine-tune a model with [`Trainer`] ## Set up To start, we recommend creating a Hugging Face [account](https://hf.co/join). An account lets you host and access version controlled models, datasets, and [Spaces](https://hf.co/spaces) on the Hugging Face [Hub](https://hf.co/docs/hub/index), a collaborative platform for discovery and building. Create a [User Access Token](https://hf.co/docs/hub/security-tokens#user-access-tokens) and log in to your account. <hfoptions id="authenticate"> <hfoption id="notebook"> Paste your User Access Token into [`~huggingface_hub.notebook_login`] when prompted to log in. ```py from huggingface_hub import notebook_login notebook_login() ``` </hfoption> <hfoption id="CLI"> Make sure the [huggingface_hub[cli]](https://huggingface.co/docs/huggingface_hub/guides/cli#getting-started) package is installed and run the command below. Paste your User Access Token when prompted to log in. ```bash hf auth login ``` </hfoption> </hfoptions> Install a machine learning framework. <hfoptions id="installation"> <hfoption id="PyTorch"> ```bash !pip install torch ``` </hfoption> <hfoption id="TensorFlow"> ```bash !pip install tensorflow ``` </hfoption> </hfoptions> Then install an up-to-date version of Transformers and some additional libraries from the Hugging Face ecosystem for accessing datasets and vision models, evaluating training, and optimizing training for large models. ```bash !pip install -U transformers datasets evaluate accelerate timm ``` ## Pretrained models Each pretrained model inherits from three base classes. | **Class** | **Description** | |---|---| | [`PretrainedConfig`] | A file that specifies a models attributes such as the number of attention heads or vocabulary size. | | [`PreTrainedModel`] | A model (or architecture) defined by the model attributes from the configuration file. A pretrained model only returns the raw hidden states. For a specific task, use the appropriate model head to convert the raw hidden states into a meaningful result (for example, [`LlamaModel`] versus [`LlamaForCausalLM`]). | | Preprocessor | A class for converting raw inputs (text, images, audio, multimodal) into numerical inputs to the model. For example, [`PreTrainedTokenizer`] converts text into tensors and [`ImageProcessingMixin`] converts pixels into tensors. | We recommend using the [AutoClass](./model_doc/auto) API to load models and preprocessors because it automatically infers the appropriate architecture for each task and machine learning framework based on the name or path to the pretrained weights and configuration file. Use [`~PreTrainedModel.from_pretrained`] to load the weights and configuration file from the Hub into the model and preprocessor class. <hfoptions id="base-classes"> <hfoption id="PyTorch"> When you load a model, configure the following parameters to ensure the model is optimally loaded. - `device_map="auto"` automatically allocates the model weights to your fastest device first. - `dtype="auto"` directly initializes the model weights in the data type they're stored in, which can help avoid loading the weights twice (PyTorch loads weights in `torch.float32` by default). ```py from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", dtype="auto", device_map="auto") tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") ``` Tokenize the text and return PyTorch tensors with the tokenizer. Move the model to an accelerator if it's available to accelerate inference. ```py model_inputs = tokenizer(["The secret to baking a good cake is "], return_tensors="pt").to(model.device) ``` The model is now ready for inference or training. For inference, pass the tokenized inputs to [`~GenerationMixin.generate`] to generate text. Decode the token ids back into text with [`~PreTrainedTokenizerBase.batch_decode`]. ```py generated_ids = model.generate(**model_inputs, max_length=30) tokenizer.batch_decode(generated_ids)[0] '<s> The secret to baking a good cake is 100% in the preparation. There are so many recipes out there,' ``` </hfoption> <hfoption id="TensorFlow"> ```py from transformers import TFAutoModelForCausalLM, AutoTokenizer model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2-xl") tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2-xl") ``` Tokenize the text and return TensorFlow tensors with the tokenizer. ```py model_inputs = tokenizer(["The secret to baking a good cake is "], return_tensors="tf") ``` The model is now ready for inference or training. For inference, pass the tokenized inputs to [`~GenerationMixin.generate`] to generate text. Decode the token ids back into text with [`~PreTrainedTokenizerBase.batch_decode`]. ```py generated_ids = model.generate(**model_inputs, max_length=30) tokenizer.batch_decode(generated_ids)[0] 'The secret to baking a good cake is \xa0to use the right ingredients. \xa0The secret to baking a good cake is to use the right' ``` </hfoption> </hfoptions> > [!TIP] > Skip ahead to the [Trainer](#trainer-api) section to learn how to fine-tune a model. ## Pipeline The [`Pipeline`] class is the most convenient way to inference with a pretrained model. It supports many tasks such as text generation, image segmentation, automatic speech recognition, document question answering, and more. > [!TIP] > Refer to the [Pipeline](./main_classes/pipelines) API reference for a complete list of available tasks. Create a [`Pipeline`] object and select a task. By default, [`Pipeline`] downloads and caches a default pretrained model for a given task. Pass the model name to the `model` parameter to choose a specific model. <hfoptions id="pipeline-tasks"> <hfoption id="text generation"> Use [`~infer_device`] to automatically detect an available accelerator for inference. ```py from transformers import pipeline, infer_device device = infer_device() pipeline = pipeline("text-generation", model="meta-llama/Llama-2-7b-hf", device=device) ``` Prompt [`Pipeline`] with some initial text to generate more text. ```py pipeline("The secret to baking a good cake is ", max_length=50) [{'generated_text': 'The secret to baking a good cake is 100% in the batter. The secret to a great cake is the icing.\nThis is why we’ve created the best buttercream frosting reci'}] ``` </hfoption> <hfoption id="image segmentation"> Use [`~infer_device`] to automatically detect an available accelerator for inference. ```py from transformers import pipeline, infer_device device = infer_device() pipeline = pipeline("image-segmentation", model="facebook/detr-resnet-50-panoptic", device=device) ``` Pass an image - a URL or local path to the image - to [`Pipeline`]. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png"/> </div> ```py segments = pipeline("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") segments[0]["label"] 'bird' segments[1]["label"] 'bird' ``` </hfoption> <hfoption id="automatic speech recognition"> Use [`~infer_device`] to automatically detect an available accelerator for inference. ```py from transformers import pipeline, infer_device device = infer_device() pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-large-v3", device=device) ``` Pass an audio file to [`Pipeline`]. ```py pipeline("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac") {'text': ' He hoped there would be stew for dinner, turnips and carrots and bruised potatoes and fat mutton pieces to be ladled out in thick, peppered flour-fatten sauce.'} ``` </hfoption> </hfoptions> ## Trainer [`Trainer`] is a complete training and evaluation loop for PyTorch models. It abstracts away a lot of the boilerplate usually involved in manually writing a training loop, so you can start training faster and focus on training design choices. You only need a model, dataset, a preprocessor, and a data collator to build batches of data from the dataset. Use the [`TrainingArguments`] class to customize the training process. It provides many options for training, evaluation, and more. Experiment with training hyperparameters and features like batch size, learning rate, mixed precision, torch.compile, and more to meet your training needs. You could also use the default training parameters to quickly produce a baseline. Load a model, tokenizer, and dataset for training. ```py from transformers import AutoModelForSequenceClassification, AutoTokenizer from datasets import load_dataset model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") dataset = load_dataset("rotten_tomatoes") ``` Create a function to tokenize the text and convert it into PyTorch tensors. Apply this function to the whole dataset with the [`~datasets.Dataset.map`] method. ```py def tokenize_dataset(dataset): return tokenizer(dataset["text"]) dataset = dataset.map(tokenize_dataset, batched=True) ``` Load a data collator to create batches of data and pass the tokenizer to it. ```py from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer) ``` Next, set up [`TrainingArguments`] with the training features and hyperparameters. ```py from transformers import TrainingArguments training_args = TrainingArguments( output_dir="distilbert-rotten-tomatoes", learning_rate=2e-5, per_device_train_batch_size=8, per_device_eval_batch_size=8, num_train_epochs=2, push_to_hub=True, ) ``` Finally, pass all these separate components to [`Trainer`] and call [`~Trainer.train`] to start. ```py from transformers import Trainer trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"], eval_dataset=dataset["test"], tokenizer=tokenizer, data_collator=data_collator, ) trainer.train() ``` Share your model and tokenizer to the Hub with [`~Trainer.push_to_hub`]. ```py trainer.push_to_hub() ``` Congratulations, you just trained your first model with Transformers! ### TensorFlow > [!WARNING] > Not all pretrained models are available in TensorFlow. Refer to a models API doc to check whether a TensorFlow implementation is supported. [`Trainer`] doesn't work with TensorFlow models, but you can still train a Transformers model implemented in TensorFlow with [Keras](https://keras.io/). Transformers TensorFlow models are a standard [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model), which is compatible with Keras' [compile](https://keras.io/api/models/model_training_apis/#compile-method) and [fit](https://keras.io/api/models/model_training_apis/#fit-method) methods. Load a model, tokenizer, and dataset for training. ```py from transformers import TFAutoModelForSequenceClassification, AutoTokenizer model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` Create a function to tokenize the text and convert it into TensorFlow tensors. Apply this function to the whole dataset with the [`~datasets.Dataset.map`] method. ```py def tokenize_dataset(dataset): return tokenizer(dataset["text"]) dataset = dataset.map(tokenize_dataset) ``` Transformers provides the [`~TFPreTrainedModel.prepare_tf_dataset`] method to collate and batch a dataset. ```py tf_dataset = model.prepare_tf_dataset( dataset["train"], batch_size=16, shuffle=True, tokenizer=tokenizer ) ``` Finally, call [compile](https://keras.io/api/models/model_training_apis/#compile-method) to configure the model for training and [fit](https://keras.io/api/models/model_training_apis/#fit-method) to start. ```py from tensorflow.keras.optimizers import Adam model.compile(optimizer="adam") model.fit(tf_dataset) ``` ## Next steps Now that you have a better understanding of Transformers and what it offers, it's time to keep exploring and learning what interests you the most. - **Base classes**: Learn more about the configuration, model and processor classes. This will help you understand how to create and customize models, preprocess different types of inputs (audio, images, multimodal), and how to share your model. - **Inference**: Explore the [`Pipeline`] further, inference and chatting with LLMs, agents, and how to optimize inference with your machine learning framework and hardware. - **Training**: Study the [`Trainer`] in more detail, as well as distributed training and optimizing training on specific hardware. - **Quantization**: Reduce memory and storage requirements with quantization and speed up inference by representing weights with fewer bits. - **Resources**: Looking for end-to-end recipes for how to train and inference with a model for a specific task? Check out the task recipes!
transformers/docs/source/en/quicktour.md/0
{ "file_path": "transformers/docs/source/en/quicktour.md", "repo_id": "transformers", "token_count": 4226 }
355
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Causal language modeling [[open-in-colab]] There are two types of language modeling, causal and masked. This guide illustrates causal language modeling. Causal language models are frequently used for text generation. You can use these models for creative applications like choosing your own text adventure or an intelligent coding assistant like Copilot or CodeParrot. <Youtube id="Vpjb1lu0MDk"/> Causal language modeling predicts the next token in a sequence of tokens, and the model can only attend to tokens on the left. This means the model cannot see future tokens. GPT-2 is an example of a causal language model. This guide will show you how to: 1. Finetune [DistilGPT2](https://huggingface.co/distilbert/distilgpt2) on the [r/askscience](https://www.reddit.com/r/askscience/) subset of the [ELI5](https://huggingface.co/datasets/dany0407/eli5_category) dataset. 2. Use your finetuned model for inference. <Tip> To see all architectures and checkpoints compatible with this task, we recommend checking the [task-page](https://huggingface.co/tasks/text-generation) </Tip> Before you begin, make sure you have all the necessary libraries installed: ```bash pip install transformers datasets evaluate ``` We encourage you to log in to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to log in: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load ELI5 dataset Start by loading the first 5000 examples from the [ELI5-Category](https://huggingface.co/datasets/dany0407/eli5_category) dataset with the 🤗 Datasets library. This'll give you a chance to experiment and make sure everything works before spending more time training on the full dataset. ```py >>> from datasets import load_dataset >>> eli5 = load_dataset("dany0407/eli5_category", split="train[:5000]") ``` Split the dataset's `train` split into a train and test set with the [`~datasets.Dataset.train_test_split`] method: ```py >>> eli5 = eli5.train_test_split(test_size=0.2) ``` Then take a look at an example: ```py >>> eli5["train"][0] {'q_id': '7h191n', 'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?', 'selftext': '', 'category': 'Economics', 'subreddit': 'explainlikeimfive', 'answers': {'a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'], 'text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.", 'None yet. It has to be reconciled with a vastly different house bill and then passed again.', 'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?', 'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'], 'score': [21, 19, 5, 3], 'text_urls': [[], [], [], ['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']]}, 'title_urls': ['url'], 'selftext_urls': ['url']} ``` While this may look like a lot, you're only really interested in the `text` field. What's cool about language modeling tasks is you don't need labels (also known as an unsupervised task) because the next word *is* the label. ## Preprocess <Youtube id="ma1TrR7gE7I"/> The next step is to load a DistilGPT2 tokenizer to process the `text` subfield: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") ``` You'll notice from the example above, the `text` field is actually nested inside `answers`. This means you'll need to extract the `text` subfield from its nested structure with the [`flatten`](https://huggingface.co/docs/datasets/process#flatten) method: ```py >>> eli5 = eli5.flatten() >>> eli5["train"][0] {'q_id': '7h191n', 'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?', 'selftext': '', 'category': 'Economics', 'subreddit': 'explainlikeimfive', 'answers.a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'], 'answers.text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.", 'None yet. It has to be reconciled with a vastly different house bill and then passed again.', 'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?', 'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'], 'answers.score': [21, 19, 5, 3], 'answers.text_urls': [[], [], [], ['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']], 'title_urls': ['url'], 'selftext_urls': ['url']} ``` Each subfield is now a separate column as indicated by the `answers` prefix, and the `text` field is a list now. Instead of tokenizing each sentence separately, convert the list to a string so you can jointly tokenize them. Here is a first preprocessing function to join the list of strings for each example and tokenize the result: ```py >>> def preprocess_function(examples): ... return tokenizer([" ".join(x) for x in examples["answers.text"]]) ``` To apply this preprocessing function over the entire dataset, use the 🤗 Datasets [`~datasets.Dataset.map`] method. You can speed up the `map` function by setting `batched=True` to process multiple elements of the dataset at once, and increasing the number of processes with `num_proc`. Remove any columns you don't need: ```py >>> tokenized_eli5 = eli5.map( ... preprocess_function, ... batched=True, ... num_proc=4, ... remove_columns=eli5["train"].column_names, ... ) ``` This dataset contains the token sequences, but some of these are longer than the maximum input length for the model. You can now use a second preprocessing function to - concatenate all the sequences - split the concatenated sequences into shorter chunks defined by `block_size`, which should be both shorter than the maximum input length and short enough for your GPU RAM. ```py >>> block_size = 128 >>> def group_texts(examples): ... # Concatenate all texts. ... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} ... total_length = len(concatenated_examples[list(examples.keys())[0]]) ... # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can ... # customize this part to your needs. ... if total_length >= block_size: ... total_length = (total_length // block_size) * block_size ... # Split by chunks of block_size. ... result = { ... k: [t[i : i + block_size] for i in range(0, total_length, block_size)] ... for k, t in concatenated_examples.items() ... } ... result["labels"] = result["input_ids"].copy() ... return result ``` Apply the `group_texts` function over the entire dataset: ```py >>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4) ``` Now create a batch of examples using [`DataCollatorForLanguageModeling`]. It's more efficient to *dynamically pad* the sentences to the longest length in a batch during collation, instead of padding the whole dataset to the maximum length. <frameworkcontent> <pt> Use the end-of-sequence token as the padding token and set `mlm=False`. This will use the inputs as labels shifted to the right by one element: ```py >>> from transformers import DataCollatorForLanguageModeling >>> tokenizer.pad_token = tokenizer.eos_token >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) ``` </pt> <tf> Use the end-of-sequence token as the padding token and set `mlm=False`. This will use the inputs as labels shifted to the right by one element: ```py >>> from transformers import DataCollatorForLanguageModeling >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf") ``` </tf> </frameworkcontent> ## Train <frameworkcontent> <pt> <Tip> If you aren't familiar with finetuning a model with the [`Trainer`], take a look at the [basic tutorial](../training#train-with-pytorch-trainer)! </Tip> You're ready to start training your model now! Load DistilGPT2 with [`AutoModelForCausalLM`]: ```py >>> from transformers import AutoModelForCausalLM, TrainingArguments, Trainer >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` At this point, only three steps remain: 1. Define your training hyperparameters in [`TrainingArguments`]. The only required parameter is `output_dir` which specifies where to save your model. You'll push this model to the Hub by setting `push_to_hub=True` (you need to be signed in to Hugging Face to upload your model). 2. Pass the training arguments to [`Trainer`] along with the model, datasets, and data collator. 3. Call [`~Trainer.train`] to finetune your model. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_eli5_clm-model", ... eval_strategy="epoch", ... learning_rate=2e-5, ... weight_decay=0.01, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=lm_dataset["train"], ... eval_dataset=lm_dataset["test"], ... data_collator=data_collator, ... tokenizer=tokenizer, ... ) >>> trainer.train() ``` Once training is completed, use the [`~transformers.Trainer.evaluate`] method to evaluate your model and get its perplexity: ```py >>> import math >>> eval_results = trainer.evaluate() >>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}") Perplexity: 49.61 ``` Then share your model to the Hub with the [`~transformers.Trainer.push_to_hub`] method so everyone can use your model: ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> If you aren't familiar with finetuning a model with Keras, take a look at the [basic tutorial](../training#train-a-tensorflow-model-with-keras)! </Tip> To finetune a model in TensorFlow, start by setting up an optimizer function, learning rate schedule, and some training hyperparameters: ```py >>> from transformers import create_optimizer, AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` Then you can load DistilGPT2 with [`TFAutoModelForCausalLM`]: ```py >>> from transformers import TFAutoModelForCausalLM >>> model = TFAutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` Convert your datasets to the `tf.data.Dataset` format with [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]: ```py >>> tf_train_set = model.prepare_tf_dataset( ... lm_dataset["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = model.prepare_tf_dataset( ... lm_dataset["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` Configure the model for training with [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). Note that Transformers models all have a default task-relevant loss function, so you don't need to specify one unless you want to: ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) # No loss argument! ``` This can be done by specifying where to push your model and tokenizer in the [`~transformers.PushToHubCallback`]: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> callback = PushToHubCallback( ... output_dir="my_awesome_eli5_clm-model", ... tokenizer=tokenizer, ... ) ``` Finally, you're ready to start training your model! Call [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) with your training and validation datasets, the number of epochs, and your callback to finetune the model: ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback]) ``` Once training is completed, your model is automatically uploaded to the Hub so everyone can use it! </tf> </frameworkcontent> <Tip> For a more in-depth example of how to finetune a model for causal language modeling, take a look at the corresponding [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb) or [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). </Tip> ## Inference Great, now that you've finetuned a model, you can use it for inference! Come up with a prompt you'd like to generate text from: ```py >>> prompt = "Somatic hypermutation allows the immune system to" ``` The simplest way to try out your finetuned model for inference is to use it in a [`pipeline`]. Instantiate a `pipeline` for text generation with your model, and pass your text to it: ```py >>> from transformers import pipeline >>> generator = pipeline("text-generation", model="username/my_awesome_eli5_clm-model") >>> generator(prompt) [{'generated_text': "Somatic hypermutation allows the immune system to be able to effectively reverse the damage caused by an infection.\n\n\nThe damage caused by an infection is caused by the immune system's ability to perform its own self-correcting tasks."}] ``` <frameworkcontent> <pt> Tokenize the text and return the `input_ids` as PyTorch tensors: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_clm-model") >>> inputs = tokenizer(prompt, return_tensors="pt").input_ids ``` Use the [`~generation.GenerationMixin.generate`] method to generate text. For more details about the different text generation strategies and parameters for controlling generation, check out the [Text generation strategies](../generation_strategies) page. ```py >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("username/my_awesome_eli5_clm-model") >>> outputs = model.generate(inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95) ``` Decode the generated token ids back into text: ```py >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["Somatic hypermutation allows the immune system to react to drugs with the ability to adapt to a different environmental situation. In other words, a system of 'hypermutation' can help the immune system to adapt to a different environmental situation or in some cases even a single life. In contrast, researchers at the University of Massachusetts-Boston have found that 'hypermutation' is much stronger in mice than in humans but can be found in humans, and that it's not completely unknown to the immune system. A study on how the immune system"] ``` </pt> <tf> Tokenize the text and return the `input_ids` as TensorFlow tensors: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_clm-model") >>> inputs = tokenizer(prompt, return_tensors="tf").input_ids ``` Use the [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] method to create the summarization. For more details about the different text generation strategies and parameters for controlling generation, check out the [Text generation strategies](../generation_strategies) page. ```py >>> from transformers import TFAutoModelForCausalLM >>> model = TFAutoModelForCausalLM.from_pretrained("username/my_awesome_eli5_clm-model") >>> outputs = model.generate(input_ids=inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95) ``` Decode the generated token ids back into text: ```py >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Somatic hypermutation allows the immune system to detect the presence of other viruses as they become more prevalent. Therefore, researchers have identified a high proportion of human viruses. The proportion of virus-associated viruses in our study increases with age. Therefore, we propose a simple algorithm to detect the presence of these new viruses in our samples as a sign of improved immunity. A first study based on this algorithm, which will be published in Science on Friday, aims to show that this finding could translate into the development of a better vaccine that is more effective for'] ``` </tf> </frameworkcontent>
transformers/docs/source/en/tasks/language_modeling.md/0
{ "file_path": "transformers/docs/source/en/tasks/language_modeling.md", "repo_id": "transformers", "token_count": 5554 }
356
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Visual document retrieval Documents can contain multimodal data if they include charts, tables, and visuals in addition to text. Retrieving information from these documents is challenging because text retrieval models alone can't handle visual data and image retrieval models lack the granularity and document processing capabilities. Visual document retrieval can help retrieve information from all types of documents, including multimodal retrieval augmented generation (RAG). These models accept documents (as images) and texts and calculates the similarity scores between them. This guide demonstrates how to index and retrieve documents with [ColPali](../model_doc/colpali). > [!TIP] > For large scale use cases, you may want to index and retrieve documents with a vector database. Make sure Transformers and Datasets is installed. ```bash pip install -q datasets transformers ``` We will index a dataset of documents related to UFO sightings. We filter the examples where our column of interest is missing. It contains several columns, we are interested in the column `specific_detail_query` where it contains short summary of the document, and `image` column that contains our documents. ```python from datasets import load_dataset dataset = load_dataset("davanstrien/ufo-ColPali") dataset = dataset["train"] dataset = dataset.filter(lambda example: example["specific_detail_query"] is not None) dataset ``` ``` Dataset({ features: ['image', 'raw_queries', 'broad_topical_query', 'broad_topical_explanation', 'specific_detail_query', 'specific_detail_explanation', 'visual_element_query', 'visual_element_explanation', 'parsed_into_json'], num_rows: 2172 }) ``` Let's load the model and the tokenizer. ```python import torch from transformers import ColPaliForRetrieval, ColPaliProcessor, infer_device device = infer_device() model_name = "vidore/colpali-v1.2-hf" processor = ColPaliProcessor.from_pretrained(model_name) model = ColPaliForRetrieval.from_pretrained( model_name, dtype=torch.bfloat16, device_map="auto", ).eval() ``` Pass the text query to the processor and return the indexed text embeddings from the model. For image-to-text search, replace the `text` parameter in [`ColPaliProcessor`] with the `images` parameter to pass images. ```python inputs = processor(text="a document about Mars expedition").to(model.device) with torch.no_grad(): text_embeds = model(**inputs, return_tensors="pt").embeddings ``` Index the images offline, and during inference, return the query text embeddings to get its closest image embeddings. Store the image and image embeddings by writing them to the dataset with [`~datasets.Dataset.map`] as shown below. Add an `embeddings` column that contains the indexed embeddings. ColPali embeddings take up a lot of storage, so remove them from the accelerator and store them in the CPU as NumPy vectors. ```python ds_with_embeddings = dataset.map(lambda example: {'embeddings': model(**processor(images=example["image"]).to(devide), return_tensors="pt").embeddings.to(torch.float32).detach().cpu().numpy()}) ``` For online inference, create a function to search the image embeddings in batches and retrieve the k-most relevant images. The function below returns the indices in the dataset and their scores for a given indexed dataset, text embeddings, number of top results, and the batch size. ```python def find_top_k_indices_batched(dataset, text_embedding, processor, k=10, batch_size=4): scores_and_indices = [] for start_idx in range(0, len(dataset), batch_size): end_idx = min(start_idx + batch_size, len(dataset)) batch = dataset[start_idx:end_idx] batch_embeddings = [torch.tensor(emb[0], dtype=torch.float32) for emb in batch["embeddings"]] scores = processor.score_retrieval(text_embedding.to("cpu").to(torch.float32), batch_embeddings) if hasattr(scores, "tolist"): scores = scores.tolist()[0] for i, score in enumerate(scores): scores_and_indices.append((score, start_idx + i)) sorted_results = sorted(scores_and_indices, key=lambda x: -x[0]) topk = sorted_results[:k] indices = [idx for _, idx in topk] scores = [score for score, _ in topk] return indices, scores ``` Generate the text embeddings and pass them to the function above to return the dataset indices and scores. ```python with torch.no_grad(): text_embeds = model(**processor(text="a document about Mars expedition").to(model.device), return_tensors="pt").embeddings indices, scores = find_top_k_indices_batched(ds_with_embeddings, text_embeds, processor, k=3, batch_size=4) print(indices, scores) ``` ``` ([440, 442, 443], [14.370786666870117, 13.675487518310547, 12.9899320602417]) ``` Display the images to view the Mars related documents. ```python for i in indices: display(dataset[i]["image"]) ``` <div style="display: flex; align-items: center;"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/doc_1.png" alt="Document 1" style="height: 200px; object-fit: contain; margin-right: 10px;"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/doc_2.png" alt="Document 2" style="height: 200px; object-fit: contain;"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/doc_3.png" alt="Document 3" style="height: 200px; object-fit: contain;"> </div>
transformers/docs/source/en/tasks/visual_document_retrieval.md/0
{ "file_path": "transformers/docs/source/en/tasks/visual_document_retrieval.md", "repo_id": "transformers", "token_count": 2006 }
357
- sections: - local: index title: 🤗 Transformers - local: quicktour title: Tour rápido - local: installation title: Instalación title: Empezar - sections: - local: pipeline_tutorial title: Pipelines para inferencia - local: autoclass_tutorial title: Carga instancias preentrenadas con un AutoClass - local: preprocessing title: Preprocesamiento - local: training title: Fine-tuning a un modelo pre-entrenado - local: accelerate title: Entrenamiento distribuido con 🤗 Accelerate - local: model_sharing title: Compartir un modelo title: Tutoriales - sections: - isExpanded: false sections: - local: tasks/question_answering title: Respuesta a preguntas - local: tasks/language_modeling title: Modelado de lenguaje - local: tasks/summarization title: Generación de resúmenes - local: tasks/multiple_choice title: Selección múltiple - local: tasks/image_captioning title: Subtítulos de imágenes title: Procesamiento del Lenguaje Natural - isExpanded: false sections: - local: tasks/asr title: Reconocimiento automático del habla - local: tasks/audio_classification title: Clasificación de audio title: Audio - isExpanded: false sections: - local: tasks/image_classification title: Clasificación de imágenes title: Visión Artificial title: Guías prácticas - sections: - local: fast_tokenizers title: Usa tokenizadores de 🤗 Tokenizers - local: multilingual title: Modelos multilingües para inferencia - local: create_a_model title: Crea una arquitectura personalizada - local: custom_models title: Compartir modelos personalizados - local: run_scripts title: Entrenamiento con scripts - local: chat_templating title: Plantillas para Modelos de Chat - local: trainer title: Entrenador - local: sagemaker title: Ejecutar el entrenamiento en Amazon SageMaker - local: converting_tensorflow_models title: Convertir checkpoints de TensorFlow - local: serialization title: Exportar a ONNX - local: torchscript title: Exportar a TorchScript - local: community title: Los recursos de la comunidad title: Guías para desarrolladores - sections: - local: performance title: Descripción general - local: debugging title: Debugging title: Rendimiento y escalabilidad - sections: - local: add_new_pipeline title: ¿Cómo puedo añadir un pipeline a 🤗 Transformers? - local: pr_checks title: Verificaciones en un Pull Request title: Contribuir - sections: - local: philosophy title: Filosofía - local: glossary title: Glosario - local: task_summary title: Lo que 🤗 Transformers puede hacer - local: tasks_explained title: Como los 🤗 Transformers resuelven tareas - local: tokenizer_summary title: Descripción general de los tokenizadores - local: attention title: Mecanismos de atención - local: pad_truncation title: Relleno y truncamiento - local: bertology title: BERTología - local: perplexity title: Perplejidad de los modelos de longitud fija - local: pipeline_webserver title: Flujo de trabajo para la inferencia de los servidores web - local: model_memory_anatomy title: Anatomía del entrenamiento de los modelos title: Guías conceptuales
transformers/docs/source/es/_toctree.yml/0
{ "file_path": "transformers/docs/source/es/_toctree.yml", "repo_id": "transformers", "token_count": 1230 }
358
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Anatomía del entrenamiento de los modelos Para entender las técnicas de optimización del rendimiento que se pueden aplicar para mejorar la eficiencia en la velocidad del entrenamiento de los modelos y la utilización de la memoria, es útil familiarizarse con cómo se utiliza la GPU durante el entrenamiento y cómo varía la intensidad de cálculo según la operación realizada. Empecemos explorando un ejemplo enfocado en la utilización de la GPU y la ejecución del entrenamiento de un modelo. Para la demostración, necesitaremos instalar algunas bibliotecas: ```bash pip install transformers datasets accelerate nvidia-ml-py3 ``` La biblioteca `nvidia-ml-py3` nos permite monitorear la utilización de memoria de los modelos desde Python. Es posible que estés familiarizado con el comando `nvidia-smi` en la terminal, esta biblioteca nos permite acceder a la misma información en Python directamente. Luego, creamos algunos datos ficticios: IDs de tokens aleatorios entre 100 y 30000 y etiquetas binarias para un clasificador. En total, obtenemos 512 secuencias cada una con longitud 512 y las almacenamos en un [`~datasets.Dataset`] con formato PyTorch. ```py >>> import numpy as np >>> from datasets import Dataset >>> seq_len, dataset_size = 512, 512 >>> dummy_data = { ... "input_ids": np.random.randint(100, 30000, (dataset_size, seq_len)), ... "labels": np.random.randint(0, 1, (dataset_size)), ... } >>> ds = Dataset.from_dict(dummy_data) >>> ds.set_format("pt") ``` Para imprimir estadísticas resumidas para la utilización de la GPU y la ejecución del entrenamiento con [`Trainer`](https://huggingface.co/docs/transformers/en/main_classes/trainer#transformers.Trainer), definimos dos funciones auxiliares: ```py >>> from pynvml import * >>> def print_gpu_utilization(): ... nvmlInit() ... handle = nvmlDeviceGetHandleByIndex(0) ... info = nvmlDeviceGetMemoryInfo(handle) ... print(f"GPU memory occupied: {info.used//1024**2} MB.") >>> def print_summary(result): ... print(f"Time: {result.metrics['train_runtime']:.2f}") ... print(f"Samples/second: {result.metrics['train_samples_per_second']:.2f}") ... print_gpu_utilization() ``` Comencemos comprobando que la memoria GPU este libre: ```py >>> print_gpu_utilization() GPU memory occupied: 0 MB. ``` Parece estar bien: la memoria de la GPU no está ocupada como esperaríamos antes de cargar cualquier modelo. Si no es el caso en tu máquina, asegúrate de detener todos los procesos que estén utilizando la memoria de la GPU. Sin embargo, no toda la memoria libre de la GPU puede ser utilizada por el usuario. Cuando se carga un modelo en la GPU, también se cargan los kernels, lo que puede ocupar 1-2GB de memoria. Para ver cuánta memoria será ocupada por defecto, cargemos un tensor diminuto en la GPU, lo que también desencadena la carga de los kernels. ```py >>> import torch >>> torch.ones((1, 1)).to("cuda") >>> print_gpu_utilization() GPU memory occupied: 1343 MB. ``` Vemos que los kernels solos ocupan 1,3GB de memoria de la GPU. Ahora, veamos cuánto espacio ocupa el modelo. ## Cargar el Modelo Primero, cargamos el modelo `google-bert/bert-large-uncased`. Los pesos del modelo son cargados directamente en la GPU para que podamos verificar cuánto espacio ocupan solo los pesos. ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("google-bert/bert-large-uncased").to("cuda") >>> print_gpu_utilization() GPU memory occupied: 2631 MB. ``` Podemos ver que los pesos del modelo solos ocupan 1,3 GB de memoria de la GPU. El número exacto depende de la GPU específica que estés utilizando. Ten en cuenta que en GPUs más modernas, un modelo puede ocupar más espacio ya que los pesos se cargan de manera optimizada lo cual acelera el uso del modelo. Ahora también podemos verificar rápidamente si obtenemos el mismo resultado que con la CLI de `nvidia-smi`: ```bash nvidia-smi ``` ```bash Tue Jan 11 08:58:05 2022 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 460.91.03 Driver Version: 460.91.03 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla V100-SXM2... On | 00000000:00:04.0 Off | 0 | | N/A 37C P0 39W / 300W | 2631MiB / 16160MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | 0 N/A N/A 3721 C ...nvs/codeparrot/bin/python 2629MiB | +-----------------------------------------------------------------------------+ ``` Obtenemos el mismo número que antes y también puedes ver que estamos utilizando una GPU V100 con 16GB de memoria. Ahora podemos empezar a entrenar el modelo y ver cómo cambia el consumo de memoria de la GPU. Primero, configuramos algunos argumentos de entrenamiento estándar: ```py default_args = { "output_dir": "tmp", "eval_strategy": "steps", "num_train_epochs": 1, "log_level": "error", "report_to": "none", } ``` <Tip> Si planeas ejecutar varias pruebas, reinicie el kernel de Python entre cada prueba para borrar correctamente la memoria. </Tip> ## Utilización de la memoria en el entrenamiento Vamos a utilizar el [`Trainer`](https://huggingface.co/docs/transformers/en/main_classes/trainer#transformers.Trainer) y entrenar el modelo sin utilizar ninguna técnica de optimización del rendimiento de la GPU y un tamaño de lote de 4: ```py >>> from transformers import TrainingArguments, Trainer, logging >>> logging.set_verbosity_error() >>> training_args = TrainingArguments(per_device_train_batch_size=4, **default_args) >>> trainer = Trainer(model=model, args=training_args, train_dataset=ds) >>> result = trainer.train() >>> print_summary(result) ``` ``` Time: 57.82 Samples/second: 8.86 GPU memory occupied: 14949 MB. ``` Vemos que incluso un tamaño de lote relativamente pequeño casi llena toda la memoria de nuestra GPU. Sin embargo, un tamaño de lote más grande a menudo puede resultar en una convergencia del modelo más rápida o un mejor rendimiento final. Así que idealmente queremos ajustar el tamaño del lote a las necesidades del modelo y no a las limitaciones de la GPU. Lo interesante es que utilizamos mucha más memoria que el tamaño del modelo. Para entender un poco mejor por qué es el caso, echemos un vistazo a las operaciones y necesidades de memoria de un modelo. ## Anatomía de las Operaciones del Modelo La arquitectura de los transformers incluye 3 grupos principales de operaciones agrupadas a continuación por intensidad de cálculo. 1. **Contracciones de Tensores** Las capas lineales y componentes de la Atención Multi-Head realizan **multiplicaciones matriciales por lotes**. Estas operaciones son la parte más intensiva en cálculo del entrenamiento de los transformers. 2. **Normalizaciones Estadísticas** Softmax y normalización de capas son menos intensivas en cálculo que las contracciones de tensores, e implican una o más **operaciones de reducción**, cuyo resultado se aplica luego mediante un mapa. 3. **Operadores por Elemento** Estos son los operadores restantes: **sesgos, dropout, activaciones y conexiones residuales**. Estas son las operaciones menos intensivas en cálculo. Este conocimiento puede ser útil al analizar cuellos de botella de rendimiento. Este resumen se deriva de [Data Movement Is All You Need: A Case Study on Optimizing Transformers 2020](https://huggingface.co/papers/2007.00072) ## Anatomía de la Memoria del Modelo Hemos visto que al entrenar un modelo se utiliza mucha más memoria que solo poner el modelo en la GPU. Esto se debe a que hay muchos componentes durante el entrenamiento que utilizan memoria de la GPU. Los componentes en memoria de la GPU son los siguientes: 1. pesos del modelo 2. estados del optimizador 3. gradientes 4. activaciones hacia adelante guardadas para el cálculo del gradiente 5. buffers temporales 6. memoria específica de funcionalidad Un modelo típico entrenado en precisión mixta con AdamW requiere 18 bytes por parámetro del modelo más memoria de activación. Para la inferencia no hay estados del optimizador ni gradientes, por lo que podemos restarlos. Y así terminamos con 6 bytes por parámetro del modelo para la inferencia en precisión mixta, más la memoria de activación. Veámoslo a detalle: **Pesos del Modelo:** - 4 bytes por número de parámetros para entrenamiento en fp32 - 6 bytes por número de parámetros para entrenamiento en precisión mixta (mantiene un modelo en fp32 y uno en fp16 en memoria) **Estados del Optimizador:** - 8 bytes por número de parámetros para un AdamW normal (mantiene 2 estados) - 2 bytes por número de parámetros para optimizadores de 8 bits como [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) - 4 bytes por número de parámetros para optimizadores como SGD con momentum (mantiene solo 1 estado) **Gradientes** - 4 bytes por número de parámetros para entrenamiento en fp32 o precisión mixta (los gradientes siempre se mantienen en fp32) **Activaciones hacia Adelante** - El tamaño depende de muchos factores, los principales siendo la longitud de la secuencia, el tamaño oculto y el tamaño de lote. Hay entradas y salidas que se pasan y se devuelven por las funciones hacia adelante y hacia atrás, y las activaciones hacia adelante (*forward activations*) guardadas para el cálculo del gradiente. **Memoria Temporal** Además, hay todas clases de variables temporales que se liberan una vez que se completa el cálculo, pero en el momento podrían requerir memoria adicional y podrían provocar un error de memoria insuficiente. Por lo tanto, al codificar es crucial pensar estratégicamente sobre tales variables temporales y a veces liberarlas explícitamente tan pronto como ya no se necesitan. **Memoria Específica de Funcionalidad** Entonces, su software podría tener necesidades especiales de memoria. Por ejemplo, al generar texto mediante la búsqueda por haz, el software necesita mantener múltiples copias de las entradas y salidas. **Velocidad de Ejecución `forward` vs `backward`** Para convoluciones y capas lineales, hay 2x flops en la ejecución hacia atrás (`backward`) en comparación con la ejecución hacia adelante (`forward`), lo que generalmente se traduce en ~2x más lento (a veces más, porque los tamaños en la ejecución hacia atrás tienden a ser más complejos). Las activaciones suelen ser limitadas por ancho de banda, y es típico que una activación tenga que leer más datos en la ejecución hacia atrás que en la ejecución hacia adelante (por ejemplo, la activación hacia adelante lee una vez, escribe una vez, la activación hacia atrás lee dos veces, gradOutput y salida de la ejecución hacia adelante, y escribe una vez, gradInput). Como puedes ver, hay potencialmente unos pocos lugares donde podríamos ahorrar memoria de la GPU o acelerar operaciones. Ahora que entiendes qué afecta la utilización de la GPU y la velocidad de cálculo, consulta la página de documentación [Métodos y herramientas para entrenamiento eficiente en una sola GPU](https://huggingface.co/docs/transformers/perf_train_gpu_one) para aprender sobre técnicas de optimización del rendimiento.
transformers/docs/source/es/model_memory_anatomy.md/0
{ "file_path": "transformers/docs/source/es/model_memory_anatomy.md", "repo_id": "transformers", "token_count": 4605 }
359
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Reconocimiento automático del habla <Youtube id="TksaY_FDgnk"/> El reconocimiento automático del habla (ASR, por sus siglas en inglés) convierte una señal de habla en texto y mapea una secuencia de entradas de audio en salidas en forma de texto. Los asistentes virtuales como Siri y Alexa usan modelos de ASR para ayudar a sus usuarios todos los días. De igual forma, hay muchas otras aplicaciones, como la transcripción de contenidos en vivo y la toma automática de notas durante reuniones. En esta guía te mostraremos como: 1. Hacer fine-tuning al modelo [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) con el dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) para transcribir audio a texto. 2. Usar tu modelo ajustado para tareas de inferencia. <Tip> Revisa la [página de la tarea](https://huggingface.co/tasks/automatic-speech-recognition) de reconocimiento automático del habla para acceder a más información sobre los modelos, datasets y métricas asociados. </Tip> Antes de comenzar, asegúrate de haber instalado todas las librerías necesarias: ```bash pip install transformers datasets evaluate jiwer ``` Te aconsejamos iniciar sesión con tu cuenta de Hugging Face para que puedas subir tu modelo y comartirlo con la comunidad. Cuando te sea solicitado, ingresa tu token para iniciar sesión: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Cargar el dataset MInDS-14 Comencemos cargando un subconjunto más pequeño del dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) desde la biblioteca 🤗 Datasets. De esta forma, tendrás la oportunidad de experimentar y asegurarte de que todo funcione antes de invertir más tiempo entrenando con el dataset entero. ```py >>> from datasets import load_dataset, Audio >>> minds = load_dataset("PolyAI/minds14", name="en-US", split="train[:100]") ``` Divide la partición `train` (entrenamiento) en una partición de entrenamiento y una de prueba usando el método [`~Dataset.train_test_split`]: ```py >>> minds = minds.train_test_split(test_size=0.2) ``` Ahora échale un vistazo al dataset: ```py >>> minds DatasetDict({ train: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 16 }) test: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 4 }) }) ``` Aunque el dataset contiene mucha información útil, como los campos `lang_id` (identificador del lenguaje) y `english_transcription` (transcripción al inglés), en esta guía nos enfocaremos en los campos `audio` y `transcription`. Puedes quitar las otras columnas con el método [`~datasets.Dataset.remove_columns`]: ```py >>> minds = minds.remove_columns(["english_transcription", "intent_class", "lang_id"]) ``` Vuelve a echarle un vistazo al ejemplo: ```py >>> minds["train"][0] {'audio': {'array': array([-0.00024414, 0. , 0. , ..., 0.00024414, 0.00024414, 0.00024414], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'sampling_rate': 8000}, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} ``` Hay dos campos: - `audio`: un `array` (arreglo) unidimensional de la señal de habla que debe ser invocado para cargar y re-muestrear el archivo de audio. - `transcription`: el texto objetivo. ## Preprocesamiento El siguiente paso es cargar un procesador Wav2Vec2 para procesar la señal de audio: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base") ``` El dataset MInDS-14 tiene una tasa de muestreo de 8000kHz (puedes encontrar esta información en su [tarjeta de dataset](https://huggingface.co/datasets/PolyAI/minds14)), lo que significa que tendrás que re-muestrear el dataset a 16000kHz para poder usar el modelo Wav2Vec2 pre-entrenado: ```py >>> minds = minds.cast_column("audio", Audio(sampling_rate=16_000)) >>> minds["train"][0] {'audio': {'array': array([-2.38064706e-04, -1.58618059e-04, -5.43987835e-06, ..., 2.78103951e-04, 2.38446111e-04, 1.18740834e-04], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'sampling_rate': 16000}, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} ``` Como puedes ver en el campo `transcription`, el texto contiene una mezcla de carácteres en mayúsculas y en minúsculas. El tokenizer Wav2Vec2 fue entrenado únicamente con carácteres en mayúsculas, así que tendrás que asegurarte de que el texto se ajuste al vocabulario del tokenizer: ```py >>> def uppercase(example): ... return {"transcription": example["transcription"].upper()} >>> minds = minds.map(uppercase) ``` Ahora vamos a crear una función de preprocesamiento que: 1. Invoque la columna `audio` para cargar y re-muestrear el archivo de audio. 2. Extraiga el campo `input_values` (valores de entrada) del archivo de audio y haga la tokenización de la columna `transcription` con el procesador. ```py >>> def prepare_dataset(batch): ... audio = batch["audio"] ... batch = processor(audio["array"], sampling_rate=audio["sampling_rate"], text=batch["transcription"]) ... batch["input_length"] = len(batch["input_values"][0]) ... return batch ``` Para aplicar la función de preprocesamiento a todo el dataset, puedes usar la función [`~datasets.Dataset.map`] de 🤗 Datasets. Para acelerar la función `map` puedes incrementar el número de procesos con el parámetro `num_proc`. Quita las columnas que no necesites con el método [`~datasets.Dataset.remove_columns`]: ```py >>> encoded_minds = minds.map(prepare_dataset, remove_columns=minds.column_names["train"], num_proc=4) ``` 🤗 Transformers no tiene un collator de datos para la tarea de ASR, así que tendrás que adaptar el [`DataCollatorWithPadding`] para crear un lote de ejemplos. El collator también le aplicará padding dinámico a tu texto y etiquetas para que tengan la longitud del elemento más largo en su lote (en vez de la mayor longitud en el dataset entero), de forma que todas las muestras tengan una longitud uniforme. Aunque es posible hacerle padding a tu texto con el `tokenizer` haciendo `padding=True`, el padding dinámico es más eficiente. A diferencia de otros collators de datos, este tiene que aplicarle un método de padding distinto a los campos `input_values` (valores de entrada) y `labels` (etiquetas): ```py >>> import torch >>> from dataclasses import dataclass, field >>> from typing import Any, Dict, List, Optional, Union >>> @dataclass ... class DataCollatorCTCWithPadding: ... processor: AutoProcessor ... padding: Union[bool, str] = "longest" ... def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]: ... # particiona las entradas y las etiquetas ya que tienen que tener longitudes distintas y ... # requieren métodos de padding diferentes ... input_features = [{"input_values": feature["input_values"][0]} for feature in features] ... label_features = [{"input_ids": feature["labels"]} for feature in features] ... batch = self.processor.pad(input_features, padding=self.padding, return_tensors="pt") ... labels_batch = self.processor.pad(labels=label_features, padding=self.padding, return_tensors="pt") ... # remplaza el padding con -100 para ignorar la pérdida de forma correcta ... labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) ... batch["labels"] = labels ... return batch ``` Ahora puedes instanciar tu `DataCollatorForCTCWithPadding`: ```py >>> data_collator = DataCollatorCTCWithPadding(processor=processor, padding="longest") ``` ## Evaluación A menudo es útil incluir una métrica durante el entrenamiento para evaluar el rendimiento de tu modelo. Puedes cargar un método de evaluación rápidamente con la biblioteca 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index). Para esta tarea, puedes usar la métrica de [tasa de error por palabra](https://huggingface.co/spaces/evaluate-metric/wer) (WER, por sus siglas en inglés). Puedes ver la [guía rápida](https://huggingface.co/docs/evaluate/a_quick_tour) de 🤗 Evaluate para aprender más acerca de cómo cargar y computar una métrica. ```py >>> import evaluate >>> wer = evaluate.load("wer") ``` Ahora crea una función que le pase tus predicciones y etiquetas a [`~evaluate.EvaluationModule.compute`] para calcular la WER: ```py >>> import numpy as np >>> def compute_metrics(pred): ... pred_logits = pred.predictions ... pred_ids = np.argmax(pred_logits, axis=-1) ... pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id ... pred_str = processor.batch_decode(pred_ids) ... label_str = processor.batch_decode(pred.label_ids, group_tokens=False) ... wer = wer.compute(predictions=pred_str, references=label_str) ... return {"wer": wer} ``` Ahora tu función `compute_metrics` (computar métricas) está lista y podrás usarla cuando estés preparando tu entrenamiento. ## Entrenamiento <frameworkcontent> <pt> <Tip> Si no tienes experiencia haciéndole fine-tuning a un modelo con el [`Trainer`], ¡échale un vistazo al tutorial básico [aquí](../training#train-with-pytorch-trainer)! </Tip> ¡Ya puedes empezar a entrenar tu modelo! Para ello, carga Wav2Vec2 con [`AutoModelForCTC`]. Especifica la reducción que quieres aplicar con el parámetro `ctc_loss_reduction`. A menudo, es mejor usar el promedio en lugar de la sumatoria que se hace por defecto. ```py >>> from transformers import AutoModelForCTC, TrainingArguments, Trainer >>> model = AutoModelForCTC.from_pretrained( ... "facebook/wav2vec2-base", ... ctc_loss_reduction="mean", ... pad_token_id=processor.tokenizer.pad_token_id, ... ) ``` En este punto, solo quedan tres pasos: 1. Define tus hiperparámetros de entrenamiento en [`TrainingArguments`]. El único parámetro obligatorio es `output_dir` (carpeta de salida), el cual especifica dónde guardar tu modelo. Puedes subir este modelo al Hub haciendo `push_to_hub=True` (debes haber iniciado sesión en Hugging Face para subir tu modelo). Al final de cada época, el [`Trainer`] evaluará la WER y guardará el punto de control del entrenamiento. 2. Pásale los argumentos del entrenamiento al [`Trainer`] junto con el modelo, el dataset, el tokenizer, el collator de datos y la función `compute_metrics`. 3. Llama el método [`~Trainer.train`] para hacerle fine-tuning a tu modelo. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_asr_mind_model", ... per_device_train_batch_size=8, ... gradient_accumulation_steps=2, ... learning_rate=1e-5, ... warmup_steps=500, ... max_steps=2000, ... gradient_checkpointing=True, ... fp16=True, ... group_by_length=True, ... eval_strategy="steps", ... per_device_eval_batch_size=8, ... save_steps=1000, ... eval_steps=1000, ... logging_steps=25, ... load_best_model_at_end=True, ... metric_for_best_model="wer", ... greater_is_better=False, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=encoded_minds["train"], ... eval_dataset=encoded_minds["test"], ... processing_class=processor.feature_extractor, ... data_collator=data_collator, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` Una vez que el entrenamiento haya sido completado, comparte tu modelo en el Hub con el método [`~transformers.Trainer.push_to_hub`] para que todo el mundo pueda usar tu modelo: ```py >>> trainer.push_to_hub() ``` </pt> </frameworkcontent> <Tip> Para ver un ejemplo más detallado de cómo hacerle fine-tuning a un modelo para reconocimiento automático del habla, échale un vistazo a esta [entrada de blog](https://huggingface.co/blog/fine-tune-wav2vec2-english) para ASR en inglés y a esta [entrada](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) para ASR multilingüe. </Tip> ## Inferencia ¡Genial, ahora que le has hecho fine-tuning a un modelo, puedes usarlo para inferencia! Carga el archivo de audio sobre el cual quieras correr la inferencia. ¡Recuerda re-muestrar la tasa de muestreo del archivo de audio para que sea la misma del modelo si es necesario! ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) >>> sampling_rate = dataset.features["audio"].sampling_rate >>> audio_file = dataset[0]["audio"]["path"] ``` La manera más simple de probar tu modelo para hacer inferencia es usarlo en un [`pipeline`]. Puedes instanciar un `pipeline` para reconocimiento automático del habla con tu modelo y pasarle tu archivo de audio: ```py >>> from transformers import pipeline >>> transcriber = pipeline("automatic-speech-recognition", model="stevhliu/my_awesome_asr_minds_model") >>> transcriber(audio_file) {'text': 'I WOUD LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'} ``` <Tip> La transcripción es decente, pero podría ser mejor. ¡Intenta hacerle fine-tuning a tu modelo con más ejemplos para obtener resultados aún mejores! </Tip> También puedes replicar de forma manual los resultados del `pipeline` si lo deseas: <frameworkcontent> <pt> Carga un procesador para preprocesar el archivo de audio y la transcripción y devuelve el `input` como un tensor de PyTorch: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("stevhliu/my_awesome_asr_mind_model") >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") ``` Pásale tus entradas al modelo y devuelve los logits: ```py >>> from transformers import AutoModelForCTC >>> model = AutoModelForCTC.from_pretrained("stevhliu/my_awesome_asr_mind_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` Obtén los identificadores de los tokens con mayor probabilidad en las predicciones y usa el procesador para decodificarlos y transformarlos en texto: ```py >>> import torch >>> predicted_ids = torch.argmax(logits, dim=-1) >>> transcription = processor.batch_decode(predicted_ids) >>> transcription ['I WOUL LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'] ``` </pt> </frameworkcontent>
transformers/docs/source/es/tasks/asr.md/0
{ "file_path": "transformers/docs/source/es/tasks/asr.md", "repo_id": "transformers", "token_count": 6031 }
360
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Come creare una pipeline personalizzata? In questa guida, scopriremo come creare una pipeline personalizzata e condividerla sull' [Hub](https://hf.co/models) o aggiungerla nella libreria Transformers. Innanzitutto, è necessario decidere gli input grezzi che la pipeline sarà in grado di accettare. Possono essere strings, raw bytes, dictionaries o qualsiasi cosa sia l'input desiderato più probabile. Cerca di mantenere questi input il più possibile in Python in quanto facilita la compatibilità (anche con altri linguaggi tramite JSON). Questi saranno gli `inputs` della pipeline (`preprocess`). Poi definire gli `outputs`. Stessa strategia degli `inputs`. Più è seplice e meglio è. Questi saranno gli output del metodo `postprocess`. Si parte ereditando la classe base `Pipeline`. con i 4 metodi che bisogna implementare `preprocess`, `_forward`, `postprocess` e `_sanitize_parameters`. ```python from transformers import Pipeline class MyPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, inputs, maybe_arg=2): model_input = Tensor(inputs["input_ids"]) return {"model_input": model_input} def _forward(self, model_inputs): # model_inputs == {"model_input": model_input} outputs = self.model(**model_inputs) # Maybe {"logits": Tensor(...)} return outputs def postprocess(self, model_outputs): best_class = model_outputs["logits"].softmax(-1) return best_class ``` La struttura di questa suddivisione consiste nel supportare in modo relativamente continuo CPU/GPU, supportando allo stesso tempo l'esecuzione di pre/postelaborazione sulla CPU su thread diversi. `preprocess` prenderà gli input originariamente definiti e li trasformerà in qualcosa di alimentabile dal modello. Potrebbe contenere più informazioni e di solito è un `Dict`. `_forward` è il dettaglio dell'implementazione e non è destinato a essere chiamato direttamente. `forward` è il metodo preferito per assicurarsi che tutto funzioni correttamente perchè contiene delle slavaguardie. Se qualcosa è è collegato a un modello reale, appartiene al metodo `_forward`, tutto il resto è nel preprocess/postprocess. `postprocess` prende l'otput di `_forward` e lo trasforma nell'output finale che era stato deciso in precedenza. `_sanitize_parameters` esiste per consentire agli utenti di passare i parametri ogni volta che desiderano sia a inizialization time `pipeline(...., maybe_arg=4)` che al call time `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`. `_sanitize_parameters` ritorna 3 dicts di kwargs che vengono passati direttamente a `preprocess`, `_forward` e `postprocess`. Non riempire nulla se il chiamante non ha chiamato con alcun parametro aggiuntivo. Questo consente di mantenere gli argomenti predefiniti nella definizione della funzione, che è sempre più "naturale". Un esempio classico potrebbe essere l'argomento `top_k` nel post processing dei classification tasks. ```python >>> pipe = pipeline("my-new-task") >>> pipe("This is a test") [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05} {"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}] >>> pipe("This is a test", top_k=2) [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}] ``` In order to achieve that, we'll update our `postprocess` method with a default parameter to `5`. and edit `_sanitize_parameters` to allow this new parameter. ```python def postprocess(self, model_outputs, top_k=5): best_class = model_outputs["logits"].softmax(-1) # Add logic to handle top_k return best_class def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] postprocess_kwargs = {} if "top_k" in kwargs: postprocess_kwargs["top_k"] = kwargs["top_k"] return preprocess_kwargs, {}, postprocess_kwargs ``` Cercare di mantenere gli input/output molto semplici e idealmente serializzabili in JSON, in quanto ciò rende l'uso della pipeline molto facile senza richiedere agli utenti di comprendere nuovi tipi di oggetti. È anche relativamente comune supportare molti tipi di argomenti per facilitarne l'uso (ad esempio file audio, possono essere nomi di file, URL o byte puri). ## Aggiungilo alla lista dei tasks supportati Per registrar il tuo `new-task` alla lista dei tasks supportati, devi aggiungerlo al `PIPELINE_REGISTRY`: ```python from transformers.pipelines import PIPELINE_REGISTRY PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, ) ``` Puoi specificare il modello di default che desideri, in questo caso dovrebbe essere accompagnato da una revisione specifica (che può essere il nome di un branch o l'hash di un commit, in questo caso abbiamo preso `"abcdef"`) e anche dal type: ```python PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, default={"pt": ("user/awesome_model", "abcdef")}, type="text", # current support type: text, audio, image, multimodal ) ``` ## Condividi la tua pipeline sull'Hub Per condividere la tua pipeline personalizzata sull'Hub, devi solo salvare il codice della tua sottoclasse `Pipeline` in un file python. Per esempio, supponiamo di voler utilizzare una pipeline personalizzata per la classificazione delle coppie di frasi come la seguente: ```py import numpy as np from transformers import Pipeline def softmax(outputs): maxes = np.max(outputs, axis=-1, keepdims=True) shifted_exp = np.exp(outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class PairClassificationPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "second_text" in kwargs: preprocess_kwargs["second_text"] = kwargs["second_text"] return preprocess_kwargs, {}, {} def preprocess(self, text, second_text=None): return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework) def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs): logits = model_outputs.logits[0].numpy() probabilities = softmax(logits) best_class = np.argmax(probabilities) label = self.model.config.id2label[best_class] score = probabilities[best_class].item() logits = logits.tolist() return {"label": label, "score": score, "logits": logits} ``` L'implementazione è agnostica al framework, e lavorerà sia con modelli PyTorch che con TensorFlow. Se l'abbiamo salvato in un file chiamato `pair_classification.py`, può essere successivamente importato e registrato in questo modo: ```py from pair_classification import PairClassificationPipeline from transformers.pipelines import PIPELINE_REGISTRY from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, tf_model=TFAutoModelForSequenceClassification, ) ``` Una volta fatto, possiamo usarla con un modello pretrained. L'istanza `sgugger/finetuned-bert-mrpc` è stata fine-tuned sul dataset MRPC, che classifica le coppie di frasi come parafrasi o no. ```py from transformers import pipeline classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc") ``` Successivamente possiamo condividerlo sull'Hub usando il metodo `push_to_hub` ```py classifier.push_to_hub("test-dynamic-pipeline") ``` Questo codice copierà il file dove è stato definitp `PairClassificationPipeline` all'interno della cartella `"test-dynamic-pipeline"`, insieme al salvataggio del modello e del tokenizer della pipeline, prima di pushare il tutto nel repository `{your_username}/test-dynamic-pipeline`. Dopodiché chiunque potrà utilizzarlo, purché fornisca l'opzione `trust_remote_code=True`: ```py from transformers import pipeline classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True) ``` ## Aggiungere la pipeline a Transformers Se vuoi contribuire con la tua pipeline a Transformers, dovrai aggiungere un modulo nel sottomodulo `pipelines` con il codice della tua pipeline, quindi aggiungilo all'elenco dei tasks definiti in `pipelines/__init__.py`. Poi hai bisogno di aggiungere i test. Crea un nuovo file `tests/test_pipelines_MY_PIPELINE.py` con esempi ed altri test. La funzione `run_pipeline_test` sarà molto generica e su piccoli modelli casuali su ogni possibile architettura, come definito da `model_mapping` e `tf_model_mapping`. Questo è molto importante per testare la compatibilità futura, nel senso che se qualcuno aggiunge un nuovo modello di `XXXForQuestionAnswering` allora il test della pipeline tenterà di essere eseguito su di esso. Poiché i modelli sono casuali, è è impossibile controllare i valori effettivi, per questo esiste un aiuto `ANY` che tenterà solamente di far corrispondere l'output della pipeline TYPE. Hai anche *bisogno* di implementare 2 (idealmente 4) test. - `test_small_model_pt` : Definire 1 piccolo modello per questa pipeline (non importa se i risultati non hanno senso) e testare i risultati della pipeline. I risultati dovrebbero essere gli stessi di `test_small_model_tf`. - `test_small_model_tf` : Definire 1 piccolo modello per questa pipeline (non importa se i risultati non hanno senso) e testare i risultati della pipeline. I risultati dovrebbero essere gli stessi di `test_small_model_pt`. - `test_large_model_pt` (`optional`): Testare la pipeline su una pipeline reale in cui i risultati dovrebbero avere senso. Questi test sono lenti e dovrebbero essere contrassegnati come tali. In questo caso l'obiettivo è mostrare la pipeline e assicurarsi che non ci siano derive nelle versioni future - `test_large_model_tf` (`optional`): Testare la pipeline su una pipeline reale in cui i risultati dovrebbero avere senso. Questi test sono lenti e dovrebbero essere contrassegnati come tali. In questo caso l'obiettivo è mostrare la pipeline e assicurarsi che non ci siano derive nelle versioni future
transformers/docs/source/it/add_new_pipeline.md/0
{ "file_path": "transformers/docs/source/it/add_new_pipeline.md", "repo_id": "transformers", "token_count": 4072 }
361
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Inferenza efficiente su GPU singola Questo documento sarà presto completato con informazioni su come effetture l'inferenza su una singola GPU. Nel frattempo è possibile consultare [la guida per l'addestramento su una singola GPU](perf_train_gpu_one) e [la guida per l'inferenza su CPU](perf_infer_cpu). ## `BetterTransformer` per l'inferenza più veloce Abbiamo recentemente integrato `BetterTransformer` per velocizzare l'inferenza su GPU per modelli di testo, immagini e audio. Per maggiori dettagli, consultare la documentazione su questa integrazione [qui](https://huggingface.co/docs/optimum/bettertransformer/overview). ## Integrazione di `bitsandbytes` per Int8 mixed-precision matrix decomposition <Tip> Nota che questa funzione può essere utilizzata anche nelle configurazioni multi GPU. </Tip> Dal paper [`LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale`](https://huggingface.co/papers/2208.07339), noi supportiamo l'integrazione di Hugging Face per tutti i modelli dell'Hub con poche righe di codice. Il metodo `nn.Linear` riduce la dimensione di 2 per i pesi `float16` e `bfloat16` e di 4 per i pesi `float32`, con un impatto quasi nullo sulla qualità, operando sugli outlier in half-precision. ![HFxbitsandbytes.png](https://cdn-uploads.huggingface.co/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) Il metodo Int8 mixed-precision matrix decomposition funziona separando la moltiplicazione tra matrici in due flussi: (1) una matrice di flusso di outlier di caratteristiche sistematiche moltiplicata in fp16, (2) in flusso regolare di moltiplicazione di matrici int8 (99,9%). Con questo metodo, è possibile effettutare inferenza int8 per modelli molto grandi senza degrado predittivo. Per maggiori dettagli sul metodo, consultare il [paper](https://huggingface.co/papers/2208.07339) o il nostro [blogpost sull'integrazione](https://huggingface.co/blog/hf-bitsandbytes-integration). ![MixedInt8.gif](https://cdn-uploads.huggingface.co/production/uploads/1660567469965-62441d1d9fdefb55a0b7d12c.gif) Nota che è necessaria una GPU per eseguire modelli di tipo mixed-8bit, poiché i kernel sono stati compilati solo per le GPU. Prima di utilizzare questa funzione, assicurarsi di disporre di memoria sufficiente sulla GPU per memorizzare un quarto del modello (o la metà se i pesi del modello sono in mezza precisione). Di seguito sono riportate alcune note per aiutarvi a utilizzare questo modulo, oppure seguite le dimostrazioni su [Google colab](#colab-demos). ### Requisiti - Se si dispone di `bitsandbytes<0.37.0`, assicurarsi di eseguire su GPU NVIDIA che supportano tensor cores a 8 bit (Turing, Ampere o architetture più recenti - ad esempio T4, RTX20s RTX30s, A40-A100). Per `bitsandbytes>=0.37.0`, tutte le GPU dovrebbero essere supportate. - Installare la versione corretta di `bitsandbytes` eseguendo: `pip install bitsandbytes>=0.31.5`. - Installare `accelerate` `pip install accelerate>=0.12.0` ### Esecuzione di modelli mixed-Int8 - configurazione per singola GPU Dopo aver installato le librerie necessarie, per caricare il tuo modello mixed 8-bit è il seguente: ```py from transformers import AutoModelForCausalLM, BitsAndBytesConfig model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` Per la generazione di testo, si consiglia di: * utilizzare il metodo `generate()` del modello invece della funzione `pipeline()`. Sebbene l'inferenza sia possibile con la funzione `pipeline()`, essa non è ottimizzata per i modelli mixed-8bit e sarà più lenta rispetto all'uso del metodo `generate()`. Inoltre, alcune strategie di campionamento, come il campionamento nucleaus, non sono supportate dalla funzione `pipeline()` per i modelli mixed-8bit. * collocare tutti gli ingressi sullo stesso dispositivo del modello. Ecco un semplice esempio: ```py from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model_name = "bigscience/bloom-2b5" tokenizer = AutoTokenizer.from_pretrained(model_name) model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) text = "Hello, my llama is cute" inputs = tokenizer(prompt, return_tensors="pt").to("cuda") generated_ids = model.generate(**inputs) outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ``` ### Esecuzione di modelli mixed-8bit - configurazione multi GPU Usare il seguente modo caricare il modello mixed-8bit su più GPU (stesso comando della configurazione a GPU singola): ```py model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` Puoi controllare la RAM della GPU che si vuole allocare su ogni GPU usando `accelerate`. Utilizzare l'argomento `max_memory` come segue: ```py max_memory_mapping = {0: "1GB", 1: "2GB"} model_name = "bigscience/bloom-3b" model_8bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping ) ``` In questo esempio, la prima GPU utilizzerà 1 GB di memoria e la seconda 2 GB. ### Colab demos Con questo metodo è possibile inferire modelli che prima non era possibile inferire su Google Colab. Guardate la demo per l'esecuzione di T5-11b (42GB in fp32)! Utilizzo la quantizzazione a 8 bit su Google Colab: [![Open In Colab: T5-11b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) Oppure questa demo di BLOOM-3B: [![Open In Colab: BLOOM-3b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing)
transformers/docs/source/it/perf_infer_gpu_one.md/0
{ "file_path": "transformers/docs/source/it/perf_infer_gpu_one.md", "repo_id": "transformers", "token_count": 2333 }
362
<!-- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ このファイルはMarkdown形式ですが、ドキュメンテーションビルダー用の特定の構文を含んでおり、Markdownビューアーでは正しく表示されないことに注意してください。 --> # Attention mechanism ほとんどのTransformerモデルは、アテンション行列が正方形であるという意味で完全なアテンションを使用します。 これは、長いテキストを扱う場合に計算のボトルネックとなることがあります。LongformerやReformerは、より効率的でトレーニングを高速化するためにアテンション行列のスパースバージョンを使用しようとするモデルです。 ## LSH attention [Reformer](model_doc/reformer)はLSH(局所的に散在ハッシュ)アテンションを使用します。 ソフトマックス(QK^t)では、行列QK^tの中で(ソフトマックス次元で)最も大きな要素のみが有用な寄与を提供します。 したがって、各クエリqについて、クエリqに近いキーkのみを考慮できます。 qとkが近いかどうかを決定するために、ハッシュ関数が使用されます。 アテンションマスクは変更され、現在のトークンをマスク化します(最初の位置を除く)。 なぜなら、それはクエリとキーが等しい(つまり非常に似ている)クエリとキーを提供するからです。 ハッシュは多少ランダムかもしれないため、実際にはいくつかのハッシュ関数が使用され(n_roundsパラメータで決定されます)、それらが平均化されます。 ## Local attention [Longformer](model_doc/longformer)はローカルアテンションを使用します。 しばしば、ローカルコンテキスト(例:左右の2つのトークンは何ですか?)は、特定のトークンに対して行動を起こすのに十分です。 また、小さなウィンドウを持つアテンションレイヤーを積み重ねることで、最後のレイヤーはウィンドウ内のトークンだけでなく、ウィンドウ内のトークンを超えて受容野を持つようになり、文全体の表現を構築できます。 一部の事前選択された入力トークンにはグローバルアテンションも与えられます。 これらの少数のトークンに対して、アテンション行列はすべてのトークンにアクセスでき、このプロセスは対称的です。 他のすべてのトークンは、これらの特定のトークンにアクセスできます(ローカルウィンドウ内のトークンに加えて)。 これは、論文の図2dに示されており、以下はサンプルのアテンションマスクです: <div class="flex justify-center"> <img scale="50 %" align="center" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/local_attention_mask.png"/> </div> ## Other tricks ### Axial positional encodings [Reformer](model_doc/reformer)は軸方向の位置エンコーディングを使用しています。伝統的なトランスフォーマーモデルでは、位置エンコーディングEはサイズが \\(l\\) × \\(d\\) の行列で、\\(l\\) はシーケンスの長さ、\\(d\\) は隠れ状態の次元です。非常に長いテキストを扱う場合、この行列は非常に大きく、GPU上で大量のスペースを占有します。これを緩和するために、軸方向の位置エンコーディングは、この大きな行列Eを2つの小さな行列E1とE2に分解します。それぞれの行列はサイズ \\(l_{1} \times d_{1}\\) および \\(l_{2} \times d_{2}\\) を持ち、 \\(l_{1} \times l_{2} = l\\) および \\(d_{1} + d_{2} = d\\) という条件を満たします(長さの積を考えると、これがはるかに小さくなります)。行列E内の時刻 \\(j\\) の埋め込みは、E1内の時刻 \\(j \% l1\\) の埋め込みとE2内の時刻 \\(j // l1\\) の埋め込みを連結することによって得られます。
transformers/docs/source/ja/attention.md/0
{ "file_path": "transformers/docs/source/ja/attention.md", "repo_id": "transformers", "token_count": 1963 }
363
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 発電用ユーティリティ このページには、[`~generation.GenerationMixin.generate`] で使用されるすべてのユーティリティ関数がリストされています。 ## 出力を生成する [`~generation.GenerationMixin.generate`] の出力は、次のサブクラスのインスタンスです。 [`~utils.ModelOutput`]。この出力は、返されたすべての情報を含むデータ構造です。 [`~generation.GenerationMixin.generate`] によって作成されますが、タプルまたは辞書としても使用できます。 以下に例を示します。 ```python from transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2") inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt") generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) ``` `generation_output` オブジェクトは、できる限り [`~generation.GenerateDecoderOnlyOutput`] です。 以下のそのクラスのドキュメントを参照してください。これは、次の属性があることを意味します。 - `sequences`: 生成されたトークンのシーケンス - `scores` (オプション): 各生成ステップの言語モデリング ヘッドの予測スコア - `hidden_​​states` (オプション): 生成ステップごとのモデルの隠れた状態 - `attentions` (オプション): 生成ステップごとのモデルのアテンションの重み ここでは、`output_scores=True`を渡したので `scores` がありますが、`hidden_​​states` はありません。 `attentions` は、`output_hidden_​​states=True`または`output_attentions=True`を渡さなかったためです。 通常と同じように各属性にアクセスできます。その属性がモデルから返されなかった場合は、 は「なし」を取得します。ここで、たとえば`generation_output.scores`は、生成されたすべての予測スコアです。 言語モデリングのヘッドであり、`generation_output.attentions`は`None`です。 `generation_output` オブジェクトをタプルとして使用する場合、`None` 値を持たない属性のみが保持されます。 たとえば、ここには 2 つの要素、`loss`、次に`logits`があります。 ```python generation_output[:2] ``` たとえば、タプル `(generation_output.sequences,generation_output.scores)` を返します。 `generation_output` オブジェクトを辞書として使用する場合、`None` を持たない属性のみが保持されます。 ここでは、たとえば、`sequences`と`scores`という 2 つのキーがあります。 ここではすべての出力タイプを文書化します。 ### PyTorch [[autodoc]] generation.GenerateDecoderOnlyOutput [[autodoc]] generation.GenerateEncoderDecoderOutput [[autodoc]] generation.GenerateBeamDecoderOnlyOutput [[autodoc]] generation.GenerateBeamEncoderDecoderOutput ### TensorFlow [[autodoc]] generation.TFGreedySearchEncoderDecoderOutput [[autodoc]] generation.TFGreedySearchDecoderOnlyOutput [[autodoc]] generation.TFSampleEncoderDecoderOutput [[autodoc]] generation.TFSampleDecoderOnlyOutput [[autodoc]] generation.TFBeamSearchEncoderDecoderOutput [[autodoc]] generation.TFBeamSearchDecoderOnlyOutput [[autodoc]] generation.TFBeamSampleEncoderDecoderOutput [[autodoc]] generation.TFBeamSampleDecoderOnlyOutput [[autodoc]] generation.TFContrastiveSearchEncoderDecoderOutput [[autodoc]] generation.TFContrastiveSearchDecoderOnlyOutput ### FLAX [[autodoc]] generation.FlaxSampleOutput [[autodoc]] generation.FlaxGreedySearchOutput [[autodoc]] generation.FlaxBeamSearchOutput ## LogitsProcessor [`LogitsProcessor`] を使用して、言語モデルのヘッドの予測スコアを変更できます。 世代。 ### PyTorch [[autodoc]] AlternatingCodebooksLogitsProcessor - __call__ [[autodoc]] ClassifierFreeGuidanceLogitsProcessor - __call__ [[autodoc]] EncoderNoRepeatNGramLogitsProcessor - __call__ [[autodoc]] EncoderRepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] EpsilonLogitsWarper - __call__ [[autodoc]] EtaLogitsWarper - __call__ [[autodoc]] ExponentialDecayLengthPenalty - __call__ [[autodoc]] ForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] ForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] HammingDiversityLogitsProcessor - __call__ [[autodoc]] InfNanRemoveLogitsProcessor - __call__ [[autodoc]] LogitNormalization - __call__ [[autodoc]] LogitsProcessor - __call__ [[autodoc]] LogitsProcessorList - __call__ [[autodoc]] MinLengthLogitsProcessor - __call__ [[autodoc]] MinNewTokensLengthLogitsProcessor - __call__ [[autodoc]] NoBadWordsLogitsProcessor - __call__ [[autodoc]] NoRepeatNGramLogitsProcessor - __call__ [[autodoc]] PrefixConstrainedLogitsProcessor - __call__ [[autodoc]] RepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] SequenceBiasLogitsProcessor - __call__ [[autodoc]] SuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] SuppressTokensLogitsProcessor - __call__ [[autodoc]] TemperatureLogitsWarper - __call__ [[autodoc]] TopKLogitsWarper - __call__ [[autodoc]] TopPLogitsWarper - __call__ [[autodoc]] TypicalLogitsWarper - __call__ [[autodoc]] UnbatchedClassifierFreeGuidanceLogitsProcessor - __call__ [[autodoc]] WhisperTimeStampLogitsProcessor - __call__ ### TensorFlow [[autodoc]] TFForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] TFForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] TFForceTokensLogitsProcessor - __call__ [[autodoc]] TFLogitsProcessor - __call__ [[autodoc]] TFLogitsProcessorList - __call__ [[autodoc]] TFLogitsWarper - __call__ [[autodoc]] TFMinLengthLogitsProcessor - __call__ [[autodoc]] TFNoBadWordsLogitsProcessor - __call__ [[autodoc]] TFNoRepeatNGramLogitsProcessor - __call__ [[autodoc]] TFRepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] TFSuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] TFSuppressTokensLogitsProcessor - __call__ [[autodoc]] TFTemperatureLogitsWarper - __call__ [[autodoc]] TFTopKLogitsWarper - __call__ [[autodoc]] TFTopPLogitsWarper - __call__ ### FLAX [[autodoc]] FlaxForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] FlaxForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] FlaxForceTokensLogitsProcessor - __call__ [[autodoc]] FlaxLogitsProcessor - __call__ [[autodoc]] FlaxLogitsProcessorList - __call__ [[autodoc]] FlaxLogitsWarper - __call__ [[autodoc]] FlaxMinLengthLogitsProcessor - __call__ [[autodoc]] FlaxSuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] FlaxSuppressTokensLogitsProcessor - __call__ [[autodoc]] FlaxTemperatureLogitsWarper - __call__ [[autodoc]] FlaxTopKLogitsWarper - __call__ [[autodoc]] FlaxTopPLogitsWarper - __call__ [[autodoc]] FlaxWhisperTimeStampLogitsProcessor - __call__ ## StoppingCriteria [`StoppingCriteria`] を使用して、(EOS トークン以外の) 生成を停止するタイミングを変更できます。これは PyTorch 実装でのみ利用可能であることに注意してください。 [[autodoc]] StoppingCriteria - __call__ [[autodoc]] StoppingCriteriaList - __call__ [[autodoc]] MaxLengthCriteria - __call__ [[autodoc]] MaxTimeCriteria - __call__ ## Constraints [`Constraint`] を使用すると、生成時に出力に特定のトークンまたはシーケンスが含まれるように強制できます。これは PyTorch 実装でのみ利用可能であることに注意してください。 [[autodoc]] Constraint [[autodoc]] PhrasalConstraint [[autodoc]] DisjunctiveConstraint [[autodoc]] ConstraintListState ## BeamSearch [[autodoc]] BeamScorer - process - finalize [[autodoc]] BeamSearchScorer - process - finalize [[autodoc]] ConstrainedBeamSearchScorer - process - finalize ## Streamers [[autodoc]] TextStreamer [[autodoc]] TextIteratorStreamer
transformers/docs/source/ja/internal/generation_utils.md/0
{ "file_path": "transformers/docs/source/ja/internal/generation_utils.md", "repo_id": "transformers", "token_count": 3478 }
364
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Models ベースクラスである [`PreTrainedModel`]、[`TFPreTrainedModel`]、[`FlaxPreTrainedModel`] は、モデルの読み込みと保存に関する共通のメソッドを実装しており、これはローカルのファイルやディレクトリから、またはライブラリが提供する事前学習モデル構成(HuggingFaceのAWS S3リポジトリからダウンロード)からモデルを読み込むために使用できます。 [`PreTrainedModel`] と [`TFPreTrainedModel`] は、次の共通のメソッドも実装しています: - 語彙に新しいトークンが追加された場合に、入力トークン埋め込みのリサイズを行う - モデルのアテンションヘッドを刈り込む 各モデルに共通するその他のメソッドは、[`~modeling_utils.ModuleUtilsMixin`](PyTorchモデル用)および[`~modeling_tf_utils.TFModuleUtilsMixin`](TensorFlowモデル用)で定義されており、テキスト生成の場合、[`~generation.GenerationMixin`](PyTorchモデル用)、[`~generation.TFGenerationMixin`](TensorFlowモデル用)、および[`~generation.FlaxGenerationMixin`](Flax/JAXモデル用)もあります。 ## PreTrainedModel [[autodoc]] PreTrainedModel - push_to_hub - all <a id='from_pretrained-torch-dtype'></a> ### 大規模モデルの読み込み Transformers 4.20.0では、[`~PreTrainedModel.from_pretrained`] メソッドが再設計され、[Accelerate](https://huggingface.co/docs/accelerate/big_modeling) を使用して大規模モデルを扱うことが可能になりました。これには Accelerate >= 0.9.0 と PyTorch >= 1.9.0 が必要です。以前の方法でフルモデルを作成し、その後事前学習の重みを読み込む代わりに(これにはメモリ内のモデルサイズが2倍必要で、ランダムに初期化されたモデル用と重み用の2つが必要でした)、モデルを空の外殻として作成し、事前学習の重みが読み込まれるときにパラメーターを実体化するオプションが追加されました。 さらに、モデルが完全にRAMに収まらない場合(現時点では推論のみ有効)、異なるデバイスにモデルを直接配置できます。`device_map="auto"` を使用すると、Accelerateは各レイヤーをどのデバイスに配置するかを決定し、最速のデバイス(GPU)を最大限に活用し、残りの部分をCPU、あるいはGPU RAMが不足している場合はハードドライブにオフロードします。モデルが複数のデバイスに分割されていても、通常どおり実行されます。 ```py from transformers import AutoModelForSeq2SeqLM t0pp = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto") ``` モデルがデバイス間でどのように分割されたかは、その `hf_device_map` 属性を見ることで確認できます: ```py t0pp.hf_device_map ``` ```python out {'shared': 0, 'decoder.embed_tokens': 0, 'encoder': 0, 'decoder.block.0': 0, 'decoder.block.1': 1, 'decoder.block.2': 1, 'decoder.block.3': 1, 'decoder.block.4': 1, 'decoder.block.5': 1, 'decoder.block.6': 1, 'decoder.block.7': 1, 'decoder.block.8': 1, 'decoder.block.9': 1, 'decoder.block.10': 1, 'decoder.block.11': 1, 'decoder.block.12': 1, 'decoder.block.13': 1, 'decoder.block.14': 1, 'decoder.block.15': 1, 'decoder.block.16': 1, 'decoder.block.17': 1, 'decoder.block.18': 1, 'decoder.block.19': 1, 'decoder.block.20': 1, 'decoder.block.21': 1, 'decoder.block.22': 'cpu', 'decoder.block.23': 'cpu', 'decoder.final_layer_norm': 'cpu', 'decoder.dropout': 'cpu', 'lm_head': 'cpu'} ``` 同じフォーマットに従って、独自のデバイスマップを作成することもできます(レイヤー名からデバイスへの辞書です)。モデルのすべてのパラメータを指定されたデバイスにマップする必要がありますが、1つのレイヤーが完全に同じデバイスにある場合、そのレイヤーのサブモジュールのすべてがどこに行くかの詳細を示す必要はありません。例えば、次のデバイスマップはT0ppに適しています(GPUメモリがある場合): ```python device_map = {"shared": 0, "encoder": 0, "decoder": 1, "lm_head": 1} ``` モデルのメモリへの影響を最小限に抑えるもう 1 つの方法は、低精度の dtype (`torch.float16` など) でモデルをインスタンス化するか、以下で説明する直接量子化手法を使用することです。 ### Model Instantiation dtype Pytorch では、モデルは通常 `torch.float32` 形式でインスタンス化されます。これは、しようとすると問題になる可能性があります 重みが fp16 にあるモデルをロードすると、2 倍のメモリが必要になるためです。この制限を克服するには、次のことができます。 `dtype` 引数を使用して、目的の `dtype` を明示的に渡します。 ```python model = T5ForConditionalGeneration.from_pretrained("t5", dtype=torch.float16) ``` または、モデルを常に最適なメモリ パターンでロードしたい場合は、特別な値 `"auto"` を使用できます。 そして、`dtype` はモデルの重みから自動的に導出されます。 ```python model = T5ForConditionalGeneration.from_pretrained("t5", dtype="auto") ``` スクラッチからインスタンス化されたモデルには、どの `dtype` を使用するかを指示することもできます。 ```python config = T5Config.from_pretrained("t5") model = AutoModel.from_config(config) ``` Pytorch の設計により、この機能は浮動小数点 dtype でのみ使用できます。 ## ModuleUtilsMixin [[autodoc]] modeling_utils.ModuleUtilsMixin ## TFPreTrainedModel [[autodoc]] TFPreTrainedModel - push_to_hub - all ## TFModelUtilsMixin [[autodoc]] modeling_tf_utils.TFModelUtilsMixin ## FlaxPreTrainedModel [[autodoc]] FlaxPreTrainedModel - push_to_hub - all ## Pushing to the Hub [[autodoc]] utils.PushToHubMixin ## Sharded checkpoints [[autodoc]] modeling_utils.load_sharded_checkpoint
transformers/docs/source/ja/main_classes/model.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/model.md", "repo_id": "transformers", "token_count": 2974 }
365
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Bark ## Overview Bark は、[suno-ai/bark](https://github.com/suno-ai/bark) で Suno AI によって提案されたトランスフォーマーベースのテキスト読み上げモデルです。 Bark は 4 つの主要なモデルで構成されています。 - [`BarkSemanticModel`] ('テキスト'モデルとも呼ばれる): トークン化されたテキストを入力として受け取り、テキストの意味を捉えるセマンティック テキスト トークンを予測する因果的自己回帰変換モデル。 - [`BarkCoarseModel`] ('粗い音響' モデルとも呼ばれる): [`BarkSemanticModel`] モデルの結果を入力として受け取る因果的自己回帰変換器。 EnCodec に必要な最初の 2 つのオーディオ コードブックを予測することを目的としています。 - [`BarkFineModel`] ('微細音響' モデル)、今回は非因果的オートエンコーダー トランスフォーマーで、以前のコードブック埋め込みの合計に基づいて最後のコードブックを繰り返し予測します。 - [`EncodecModel`] からすべてのコードブック チャネルを予測したので、Bark はそれを使用して出力オーディオ配列をデコードします。 最初の 3 つのモジュールはそれぞれ、特定の事前定義された音声に従って出力サウンドを調整するための条件付きスピーカー埋め込みをサポートできることに注意してください。 ### Optimizing Bark Bark は、コードを数行追加するだけで最適化でき、**メモリ フットプリントが大幅に削減**され、**推論が高速化**されます。 #### Using half-precision モデルを半精度でロードするだけで、推論を高速化し、メモリ使用量を 50% 削減できます。 ```python from transformers import BarkModel import torch device = "cuda" if torch.cuda.is_available() else "cpu" model = BarkModel.from_pretrained("suno/bark-small", dtype=torch.float16).to(device) ``` #### Using 🤗 Better Transformer Better Transformer は、内部でカーネル融合を実行する 🤗 最適な機能です。パフォーマンスを低下させることなく、速度を 20% ~ 30% 向上させることができます。モデルを 🤗 Better Transformer にエクスポートするのに必要なコードは 1 行だけです。 ```python model = model.to_bettertransformer() ``` この機能を使用する前に 🤗 Optimum をインストールする必要があることに注意してください。 [インストール方法はこちら](https://huggingface.co/docs/optimum/installation) #### Using CPU offload 前述したように、Bark は 4 つのサブモデルで構成されており、オーディオ生成中に順番に呼び出されます。言い換えれば、1 つのサブモデルが使用されている間、他のサブモデルはアイドル状態になります。 CUDA デバイスを使用している場合、メモリ フットプリントの 80% 削減による恩恵を受ける簡単な解決策は、アイドル状態の GPU のサブモデルをオフロードすることです。この操作は CPU オフロードと呼ばれます。 1行のコードで使用できます。 ```python model.enable_cpu_offload() ``` この機能を使用する前に、🤗 Accelerate をインストールする必要があることに注意してください。 [インストール方法はこちら](https://huggingface.co/docs/accelerate/basic_tutorials/install) #### Combining optimization techniques 最適化手法を組み合わせて、CPU オフロード、半精度、🤗 Better Transformer をすべて一度に使用できます。 ```python from transformers import BarkModel import torch device = "cuda" if torch.cuda.is_available() else "cpu" # load in fp16 model = BarkModel.from_pretrained("suno/bark-small", dtype=torch.float16).to(device) # convert to bettertransformer model = BetterTransformer.transform(model, keep_original_model=False) # enable CPU offload model.enable_cpu_offload() ``` 推論最適化手法の詳細については、[こちら](https://huggingface.co/docs/transformers/perf_infer_gpu_one) をご覧ください。 ### Tips Suno は、多くの言語で音声プリセットのライブラリを提供しています [こちら](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c)。 これらのプリセットは、ハブ [こちら](https://huggingface.co/suno/bark-small/tree/main/speaker_embeddings) または [こちら](https://huggingface.co/suno/bark/tree/main/speaker_embeddings)。 ```python >>> from transformers import AutoProcessor, BarkModel >>> processor = AutoProcessor.from_pretrained("suno/bark") >>> model = BarkModel.from_pretrained("suno/bark") >>> voice_preset = "v2/en_speaker_6" >>> inputs = processor("Hello, my dog is cute", voice_preset=voice_preset) >>> audio_array = model.generate(**inputs) >>> audio_array = audio_array.cpu().numpy().squeeze() ``` Bark は、非常にリアルな **多言語** 音声だけでなく、音楽、背景ノイズ、単純な効果音などの他の音声も生成できます。 ```python >>> # Multilingual speech - simplified Chinese >>> inputs = processor("惊人的!我会说中文") >>> # Multilingual speech - French - let's use a voice_preset as well >>> inputs = processor("Incroyable! Je peux générer du son.", voice_preset="fr_speaker_5") >>> # Bark can also generate music. You can help it out by adding music notes around your lyrics. >>> inputs = processor("♪ Hello, my dog is cute ♪") >>> audio_array = model.generate(**inputs) >>> audio_array = audio_array.cpu().numpy().squeeze() ``` このモデルは、笑う、ため息、泣くなどの**非言語コミュニケーション**を生成することもできます。 ```python >>> # Adding non-speech cues to the input text >>> inputs = processor("Hello uh ... [clears throat], my dog is cute [laughter]") >>> audio_array = model.generate(**inputs) >>> audio_array = audio_array.cpu().numpy().squeeze() ``` オーディオを保存するには、モデル設定と scipy ユーティリティからサンプル レートを取得するだけです。 ```python >>> from scipy.io.wavfile import write as write_wav >>> # save audio to disk, but first take the sample rate from the model config >>> sample_rate = model.generation_config.sample_rate >>> write_wav("bark_generation.wav", sample_rate, audio_array) ``` このモデルは、[Yoach Lacombe (ylacombe)](https://huggingface.co/ylacombe) および [Sanchit Gandhi (sanchit-gandhi)](https://github.com/sanchit-gandhi) によって提供されました。 元のコードは [ここ](https://github.com/suno-ai/bark) にあります。 ## BarkConfig [[autodoc]] BarkConfig - all ## BarkProcessor [[autodoc]] BarkProcessor - all - __call__ ## BarkModel [[autodoc]] BarkModel - generate - enable_cpu_offload ## BarkSemanticModel [[autodoc]] BarkSemanticModel - forward ## BarkCoarseModel [[autodoc]] BarkCoarseModel - forward ## BarkFineModel [[autodoc]] BarkFineModel - forward ## BarkCausalModel [[autodoc]] BarkCausalModel - forward ## BarkCoarseConfig [[autodoc]] BarkCoarseConfig - all ## BarkFineConfig [[autodoc]] BarkFineConfig - all ## BarkSemanticConfig [[autodoc]] BarkSemanticConfig - all
transformers/docs/source/ja/model_doc/bark.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bark.md", "repo_id": "transformers", "token_count": 3177 }
366
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BLIP ## Overview BLIP モデルは、[BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://huggingface.co/papers/2201.12086) で Junnan Li、Dongxu Li、Caiming Xiong、Steven Hoi によって提案されました。 。 BLIP は、次のようなさまざまなマルチモーダル タスクを実行できるモデルです。 - 視覚的な質問応答 - 画像とテキストの検索(画像とテキストのマッチング) - 画像キャプション 論文の要約は次のとおりです。 *視覚言語事前トレーニング (VLP) により、多くの視覚言語タスクのパフォーマンスが向上しました。 ただし、既存の事前トレーニング済みモデルのほとんどは、理解ベースのタスクまたは世代ベースのタスクのいずれかでのみ優れています。さらに、最適ではない監視ソースである Web から収集されたノイズの多い画像とテキストのペアを使用してデータセットをスケールアップすることで、パフォーマンスの向上が大幅に達成されました。この論文では、視覚言語の理解と生成タスクの両方に柔軟に移行する新しい VLP フレームワークである BLIP を提案します。 BLIP は、キャプションをブートストラップすることでノイズの多い Web データを効果的に利用します。キャプショナーが合成キャプションを生成し、フィルターがノイズの多いキャプションを除去します。画像テキスト検索 (平均再現率 +2.7%@1)、画像キャプション作成 (CIDEr で +2.8%)、VQA ( VQA スコアは +1.6%)。 BLIP は、ゼロショット方式でビデオ言語タスクに直接転送した場合にも、強力な一般化能力を発揮します。コード、モデル、データセットがリリースされています。* ![BLIP.gif](https://cdn-uploads.huggingface.co/production/uploads/1670928184033-62441d1d9fdefb55a0b7d12c.gif) このモデルは [ybelkada](https://huggingface.co/ybelkada) によって提供されました。 元のコードは [ここ](https://github.com/salesforce/BLIP) にあります。 ## Resources - [Jupyter ノートブック](https://github.com/huggingface/notebooks/blob/main/examples/image_captioning_blip.ipynb) カスタム データセットの画像キャプション用に BLIP を微調整する方法 ## BlipConfig [[autodoc]] BlipConfig - from_text_vision_configs ## BlipTextConfig [[autodoc]] BlipTextConfig ## BlipVisionConfig [[autodoc]] BlipVisionConfig ## BlipProcessor [[autodoc]] BlipProcessor ## BlipImageProcessor [[autodoc]] BlipImageProcessor - preprocess ## BlipImageProcessorFast [[autodoc]] BlipImageProcessorFast - preprocess <frameworkcontent> <pt> ## BlipModel [[autodoc]] BlipModel - forward - get_text_features - get_image_features ## BlipTextModel [[autodoc]] BlipTextModel - forward ## BlipVisionModel [[autodoc]] BlipVisionModel - forward ## BlipForConditionalGeneration [[autodoc]] BlipForConditionalGeneration - forward ## BlipForImageTextRetrieval [[autodoc]] BlipForImageTextRetrieval - forward ## BlipForQuestionAnswering [[autodoc]] BlipForQuestionAnswering - forward </pt> <tf> ## TFBlipModel [[autodoc]] TFBlipModel - call - get_text_features - get_image_features ## TFBlipTextModel [[autodoc]] TFBlipTextModel - call ## TFBlipVisionModel [[autodoc]] TFBlipVisionModel - call ## TFBlipForConditionalGeneration [[autodoc]] TFBlipForConditionalGeneration - call ## TFBlipForImageTextRetrieval [[autodoc]] TFBlipForImageTextRetrieval - call ## TFBlipForQuestionAnswering [[autodoc]] TFBlipForQuestionAnswering - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/blip.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/blip.md", "repo_id": "transformers", "token_count": 1815 }
367
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ConvBERT <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=convbert"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-convbert-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/conv-bert-base"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview ConvBERT モデルは、[ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://huggingface.co/papers/2008.02496) で Zihang Jiang、Weihao Yu、Daquan Zhou、Yunpeng Chen、Jiashi Feng、Shuicheng Yan によって提案されました。 やん。 論文の要約は次のとおりです。 *BERT やそのバリアントなどの事前トレーニング済み言語モデルは、最近、さまざまな環境で目覚ましいパフォーマンスを達成しています。 自然言語理解タスク。ただし、BERT はグローバルな自己注意ブロックに大きく依存しているため、問題が発生します。 メモリ使用量と計算コストが大きくなります。すべての注意が入力シーケンス全体に対してクエリを実行しますが、 グローバルな観点からアテンション マップを生成すると、一部のヘッドはローカルな依存関係のみを学習する必要があることがわかります。 これは、計算の冗長性が存在することを意味します。したがって、我々は、新しいスパンベースの動的畳み込みを提案します。 これらのセルフアテンション ヘッドを置き換えて、ローカルの依存関係を直接モデル化します。新しいコンボリューションヘッドと、 自己注意の頭を休め、グローバルとローカルの両方の状況でより効率的な新しい混合注意ブロックを形成します 学ぶ。この混合注意設計を BERT に装備し、ConvBERT モデルを構築します。実験でわかったことは、 ConvBERT は、トレーニング コストが低く、さまざまな下流タスクにおいて BERT およびその亜種よりも大幅に優れたパフォーマンスを発揮します。 モデルパラメータが少なくなります。注目すべきことに、ConvBERTbase モデルは 86.4 GLUE スコアを達成し、ELECTRAbase よりも 0.7 高いのに対し、 トレーニングコストは 1/4 未満です。コードと事前トレーニングされたモデルがリリースされます。* このモデルは、[abhishek](https://huggingface.co/abhishek) によって提供されました。オリジナルの実装が見つかります ここ: https://github.com/yitu-opensource/ConvBert ## Usage tips ConvBERT トレーニングのヒントは BERT のヒントと似ています。使用上のヒントについては、[BERT ドキュメント](bert) を参照してください。 ## Resources - [テキスト分類タスクガイド(英語版)](../../en/tasks/sequence_classification) - [トークン分類タスクガイド](../tasks/token_classification) - [質問回答タスク ガイド](../tasks/question_answering) - [マスクされた言語モデリング タスク ガイド](../tasks/masked_lang_modeling) - [多肢選択タスク ガイド](../tasks/multiple_choice) ## ConvBertConfig [[autodoc]] ConvBertConfig ## ConvBertTokenizer [[autodoc]] ConvBertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## ConvBertTokenizerFast [[autodoc]] ConvBertTokenizerFast <frameworkcontent> <pt> ## ConvBertModel [[autodoc]] ConvBertModel - forward ## ConvBertForMaskedLM [[autodoc]] ConvBertForMaskedLM - forward ## ConvBertForSequenceClassification [[autodoc]] ConvBertForSequenceClassification - forward ## ConvBertForMultipleChoice [[autodoc]] ConvBertForMultipleChoice - forward ## ConvBertForTokenClassification [[autodoc]] ConvBertForTokenClassification - forward ## ConvBertForQuestionAnswering [[autodoc]] ConvBertForQuestionAnswering - forward </pt> <tf> ## TFConvBertModel [[autodoc]] TFConvBertModel - call ## TFConvBertForMaskedLM [[autodoc]] TFConvBertForMaskedLM - call ## TFConvBertForSequenceClassification [[autodoc]] TFConvBertForSequenceClassification - call ## TFConvBertForMultipleChoice [[autodoc]] TFConvBertForMultipleChoice - call ## TFConvBertForTokenClassification [[autodoc]] TFConvBertForTokenClassification - call ## TFConvBertForQuestionAnswering [[autodoc]] TFConvBertForQuestionAnswering - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/convbert.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/convbert.md", "repo_id": "transformers", "token_count": 2167 }
368
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DialoGPT ## Overview DialoGPT は、[DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://huggingface.co/papers/1911.00536) で Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.これは、から抽出された 147M 万の会話のようなやりとりでトレーニングされた GPT2 モデルです。 レディット。 論文の要約は次のとおりです。 *私たちは、大規模で調整可能なニューラル会話応答生成モデル DialoGPT (対話生成事前トレーニング済み) を紹介します。 変成器)。 Reddit のコメント チェーンから抽出された 1 億 4,700 万件の会話のようなやり取りを対象にトレーニングされました。 2005 年から 2017 年にかけて、DialoGPT は人間に近いパフォーマンスを達成するために Hugging Face PyTorch トランスフォーマーを拡張しました。 シングルターンダイアログ設定における自動評価と人間による評価の両方。会話システムが DialoGPT を活用すると、強力なベースラインよりも関連性が高く、内容が充実し、コンテキストに一貫性のある応答が生成されます。 システム。神経反応の研究を促進するために、事前トレーニングされたモデルとトレーニング パイプラインが公開されています。 よりインテリジェントなオープンドメイン対話システムの生成と開発。* 元のコードは [ここ](https://github.com/microsoft/DialoGPT) にあります。 ## Usage tips - DialoGPT は絶対位置埋め込みを備えたモデルであるため、通常は入力を右側にパディングすることをお勧めします。 左よりも。 - DialoGPT は、会話データの因果言語モデリング (CLM) 目標に基づいてトレーニングされているため、強力です オープンドメイン対話システムにおける応答生成時。 - DialoGPT を使用すると、[DialoGPT's model card](https://huggingface.co/microsoft/DialoGPT-medium) に示されているように、ユーザーはわずか 10 行のコードでチャット ボットを作成できます。 トレーニング: DialoGPT をトレーニングまたは微調整するには、因果言語モデリング トレーニングを使用できます。公式論文を引用すると: *私たちは OpenAI GPT-2に従って、マルチターン対話セッションを長いテキストとしてモデル化し、生成タスクを言語としてフレーム化します モデリング。まず、ダイアログ セッション内のすべてのダイアログ ターンを長いテキスト x_1,..., x_N に連結します (N は * 詳細については、元の論文を参照してください。 <Tip> DialoGPT のアーキテクチャは GPT2 モデルに基づいています。API リファレンスと例については、[GPT2 のドキュメント ページ](openai-community/gpt2) を参照してください。 </Tip>
transformers/docs/source/ja/model_doc/dialogpt.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/dialogpt.md", "repo_id": "transformers", "token_count": 1577 }
369
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Efficient Training on Multiple GPUs 単一のGPUでのトレーニングが遅すぎる場合や、モデルの重みが単一のGPUのメモリに収まらない場合、複数のGPUを使用したセットアップが必要となります。単一のGPUから複数のGPUへの切り替えには、ワークロードを分散するためのある種の並列処理が必要です。データ、テンソル、またはパイプラインの並列処理など、さまざまな並列処理技術があります。ただし、すべてに適した一つの解決策は存在せず、最適な設定は使用するハードウェアに依存します。この記事は、おそらく他のフレームワークにも適用される主要な概念に焦点を当てつつ、PyTorchベースの実装に焦点を当てています。 <Tip> **注意**: [単一GPUセクション](perf_train_gpu_one) で紹介された多くの戦略(混合精度トレーニングや勾配蓄積など)は一般的であり、モデルのトレーニングに一般的に適用されます。したがって、マルチGPUやCPUトレーニングなどの次のセクションに入る前に、それを確認してください。 </Tip> まず、さまざまな1D並列処理技術とその利点および欠点について詳しく説明し、それらを2Dおよび3D並列処理に組み合わせてさらに高速なトレーニングを実現し、より大きなモデルをサポートする方法を検討します。さまざまな他の強力な代替手法も紹介されます。 ## Concepts 以下は、この文書で後で詳しく説明される主要な概念の簡単な説明です。 1. **DataParallel (DP)** - 同じセットアップが複数回複製され、各セットアップにデータのスライスが供給されます。処理は並行して行われ、各セットアップはトレーニングステップの最後に同期されます。 2. **TensorParallel (TP)** - 各テンソルは複数のチャンクに分割され、単一のGPUにテンソル全体が存在するのではなく、テンソルの各シャードが指定されたGPUに存在します。処理中に、各シャードは別々に並行して処理され、異なるGPUで同期され、ステップの最後に結果が同期されます。これは水平並列処理と呼ばれるもので、分割は水平レベルで行われます。 3. **PipelineParallel (PP)** - モデルは垂直(レイヤーレベル)に複数のGPUに分割され、モデルの単一または複数のレイヤーが単一のGPUに配置されます。各GPUはパイプラインの異なるステージを並行して処理し、バッチの小さなチャンクで作業します。 4. **Zero Redundancy Optimizer (ZeRO)** - TPといくらか似たようなテンソルのシャーディングを実行しますが、前向きまたは後向きの計算のためにテンソル全体が再構築されるため、モデルを変更する必要はありません。また、GPUメモリが制限されている場合に補償するためのさまざまなオフロード技術をサポートします。 5. **Sharded DDP** - Sharded DDPは、さまざまなZeRO実装で使用される基本的なZeROコンセプトの別名です。 各コンセプトの詳細に深入りする前に、大規模なインフラストラクチャで大規模なモデルをトレーニングする際の大まかな決定プロセスを見てみましょう。 ## Scalability Strategy **⇨ シングルノード / マルチGPU** * モデルが単一のGPUに収まる場合: 1. DDP - 分散データ並列 2. ZeRO - 状況と使用される構成に応じて速いかどうかが異なります * モデルが単一のGPUに収まらない場合: 1. PP 2. ZeRO 3. TP 非常に高速なノード内接続(NVLINKまたはNVSwitchなど)があれば、これらの3つはほぼ同じ速度になるはずで、これらがない場合、PPはTPまたはZeROよりも速くなります。TPの程度も差を生じるかもしれません。特定のセットアップでの勝者を見つけるために実験することが最善です。 TPはほとんどの場合、単一ノード内で使用されます。つまり、TPサイズ <= ノードごとのGPU数です。 * 最大のレイヤーが単一のGPUに収まらない場合: 1. ZeROを使用しない場合 - TPを使用する必要があります。PP単独では収まらないでしょう。 2. ZeROを使用する場合 - "シングルGPU"のエントリと同じものを参照してください **⇨ マルチノード / マルチGPU** * ノード間の高速接続がある場合: 1. ZeRO - モデルへのほとんどの変更が不要です 2. PP+TP+DP - 通信が少なく、モデルへの大規模な変更が必要です * ノード間の接続が遅く、GPUメモリがまだ不足している場合: 1. DP+PP+TP+ZeRO-1 ## Data Parallelism 2つのGPUを持つほとんどのユーザーは、`DataParallel`(DP)と`DistributedDataParallel`(DDP)によって提供されるトレーニング速度の向上をすでに享受しています。これらはほぼ自明に使用できるPyTorchの組み込み機能です。一般的に、すべてのモデルで動作するDDPを使用することをお勧めします。DPは一部のモデルで失敗する可能性があるためです。[PyTorchのドキュメンテーション](https://pytorch.org/docs/master/generated/torch.nn.DataParallel.html)自体もDDPの使用を推奨しています。 ### DP vs DDP `DistributedDataParallel`(DDP)は通常、`DataParallel`(DP)よりも高速ですが、常にそうとは限りません: * DPはPythonスレッドベースですが、DDPはマルチプロセスベースです。そのため、GIL(Global Interpreter Lock)などのPythonスレッドの制約がないためです。 * 一方、GPUカード間の遅い相互接続性は、DDPの場合に実際には遅い結果をもたらす可能性があります。 以下は、2つのモード間のGPU間通信の主な違いです: [DDP](https://pytorch.org/docs/master/notes/ddp.html): - 開始時、メインプロセスはモデルをGPU 0から他のGPUに複製します。 - それから各バッチごとに: 1. 各GPUは各自のミニバッチのデータを直接消費します。 2. `backward`中、ローカル勾配が準備できると、それらはすべてのプロセスで平均化されます。 [DP](https://pytorch.org/docs/master/generated/torch.nn.DataParallel.html): 各バッチごとに: 1. GPU 0はデータバッチを読み取り、それから各GPUにミニバッチを送信します。 2. GPU 0から各GPUに最新のモデルを複製します。 3. `forward`を実行し、各GPUからGPU 0に出力を送信し、損失を計算します。 4. GPU 0からすべてのGPUに損失を分散し、`backward`を実行します。 5. 各GPUからGPU 0に勾配を送信し、それらを平均化します。 DDPはバッチごとに行う通信は勾配の送信のみであり、一方、DPはバッチごとに5つの異なるデータ交換を行います。 DPはプロセス内でデータをPythonスレッドを介してコピーしますが、DDPは[torch.distributed](https://pytorch.org/docs/master/distributed.html)を介してデータをコピーします。 DPではGPU 0は他のGPUよりもはるかに多くの作業を行うため、GPUの未使用率が高くなります。 DDPは複数のマシン間で使用できますが、DPの場合はそうではありません。 DPとDDPの他にも違いがありますが、この議論には関係ありません。 これら2つのモードを深く理解したい場合、この[記事](https://www.telesens.co/2019/04/04/distributed-data-parallel-training-using-pytorch-on-aws/)を強くお勧めします。素晴らしいダイアグラムを含み、さまざまなハードウェアでの複数のベンチマークとプロファイラの出力を示し、知っておく必要があるすべての微妙なニュアンスを説明しています。 実際のベンチマークを見てみましょう: | Type | NVlink | Time | | :----- | ----- | ---: | | 2:DP | Y | 110s | | 2:DDP | Y | 101s | | 2:DDP | N | 131s | 解析: ここで、DPはNVlinkを使用したDDPに比べて約10%遅く、NVlinkを使用しないDDPに比べて約15%高速であることが示されています。 実際の違いは、各GPUが他のGPUと同期する必要があるデータの量に依存します。同期するデータが多いほど、遅いリンクが合計の実行時間を遅くする可能性が高くなります。 以下は完全なベンチマークコードと出力です: `NCCL_P2P_DISABLE=1`を使用して、対応するベンチマークでNVLink機能を無効にしました。 ```bash # DP rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 \ python examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path openai-community/gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 110.5948, 'train_samples_per_second': 1.808, 'epoch': 0.69} # DDP w/ NVlink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 \ torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path openai-community/gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} # DDP w/o NVlink rm -r /tmp/test-clm; NCCL_P2P_DISABLE=1 CUDA_VISIBLE_DEVICES=0,1 \ torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path openai-community/gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69} ``` ハードウェア: 2x TITAN RTX、各24GB + 2つのNVLink(`nvidia-smi topo -m`で `NV2`) ソフトウェア: `pytorch-1.8-to-be` + `cuda-11.0` / `transformers==4.3.0.dev0` ## ZeRO Data Parallelism ZeROパワードデータ並列処理(ZeRO-DP)は、次の[ブログ投稿](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)のダイアグラムで説明されています。 ![DeepSpeed-Image-1](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero.png) これは理解が難しいかもしれませんが、実際にはこの概念は非常にシンプルです。これは通常の`DataParallel`(DP)ですが、完全なモデルパラメータ、勾配、およびオプティマイザの状態を複製する代わりに、各GPUはそれぞれのスライスのみを保存します。そして、実行時に、特定のレイヤーに必要な完全なレイヤーパラメータが必要な場合、すべてのGPUが同期して、お互いに不足している部分を提供します。それがすべてです。 3つのレイヤーからなる単純なモデルを考えてみましょう。各レイヤーには3つのパラメータがあります: ``` La | Lb | Lc ---|----|--- a0 | b0 | c0 a1 | b1 | c1 a2 | b2 | c2 ``` レイヤーLaには、重みa0、a1、およびa2があります。 3つのGPUがある場合、Sharded DDP(= Zero-DP)はモデルを3つのGPUに次のように分割します: ``` GPU0: La | Lb | Lc ---|----|--- a0 | b0 | c0 GPU1: La | Lb | Lc ---|----|--- a1 | b1 | c1 GPU2: La | Lb | Lc ---|----|--- a2 | b2 | c2 ``` これは、典型的なディープニューラルネットワーク(DNN)のダイアグラムを想像すると、テンソル並列処理と同様の水平スライスであるようなものです。垂直スライスは、異なるGPUに完全な層グループを配置する方法です。しかし、これは単なる出発点に過ぎません。 これから、各GPUは通常のデータ並列処理(DP)と同様に、通常のミニバッチを受け取ります: ``` x0 => GPU0 x1 => GPU1 x2 => GPU2 ``` 最初に、入力データはレイヤーLaに適用されます。 GPU0に焦点を当てましょう:x0は、その前向きパスを実行するためにa0、a1、a2のパラメータが必要ですが、GPU0にはa0しかありません。GPU1からa1を、GPU2からa2を受け取り、モデルの各部分をまとめます。 同様に、GPU1はミニバッチx1を受け取り、a1しか持っていませんが、a0とa2のパラメータが必要です。これらはGPU0とGPU2から取得します。 GPU2もx2を受け取ります。a0とa1はGPU0とGPU1から受け取り、a2とともに完全なテンソルを再構築します。 3つのGPUは完全なテンソルを再構築し、前向き計算が行われます。 計算が完了すると、不要になったデータは削除されます。計算中だけ使用され、再構築は事前にフェッチを使用して効率的に行われます。 そして、このプロセス全体がレイヤーLb、次に前向きでLc、そして逆方向でLc -> Lb -> Laに対して繰り返されます。 私にとって、これは効率的なグループでの重みの分散戦略のように聞こえます: 1. 人Aはテントを持っています。 2. 人Bはストーブを持っています。 3. 人Cは斧を持っています。 今、彼らは毎晩持っているものを共有し、他の人から持っていないものをもらい、朝には割り当てられたタイプのギアを詰めて旅を続けます。これがSharded DDP / Zero DPです。 この戦略を、各人が独自のテント、ストーブ、斧を持って運ばなければならないシンプルな戦略と比較してみてください。これがPyTorchのDataParallel(DPおよびDDP)です。 このトピックの文献を読む際に、以下の類義語に出会うかもしれません:Sharded、Partitioned。 ZeROがモデルの重みを分割する方法に注意を払うと、これはテンソルパラレリズムと非常に似ているように見えます。これは後で議論される垂直モデルパラレリズムとは異なり、各レイヤーの重みをパーティション/シャーディングします。 Implementations: - [DeepSpeed](https://www.deepspeed.ai/tutorials/zero/) ZeRO-DP stages 1+2+3 - [`transformers` integration](main_classes/trainer#trainer-integrations) ## Naive Model Parallelism (Vertical) and Pipeline Parallelism ナイーブモデルパラレリズム(MP)は、モデルの層を複数のGPUに分散させる方法です。このメカニズムは比較的単純で、希望する層を`.to()`メソッドを使用して特定のデバイスに切り替えるだけです。これにより、データがこれらの層を通過するたびに、データも層と同じデバイスに切り替えられ、残りの部分は変更されません。 私たちはこれを「垂直MP」と呼びます。なぜなら、ほとんどのモデルがどのように描かれるかを思い出すと、層を垂直にスライスするからです。たとえば、以下の図は8層のモデルを示しています: ``` =================== =================== | 0 | 1 | 2 | 3 | | 4 | 5 | 6 | 7 | =================== =================== gpu0 gpu1 ``` 我々は、モデルを垂直に2つに分割し、レイヤー0から3をGPU0に配置し、レイヤー4から7をGPU1に配置しました。 データがレイヤー0から1、1から2、2から3に移動する間は通常のモデルと同じです。しかし、データがレイヤー3からレイヤー4に移動する必要がある場合、GPU0からGPU1への移動が発生し、通信のオーバーヘッドが発生します。参加しているGPUが同じコンピュートノード(例:同じ物理マシン)にある場合、このコピーは非常に高速ですが、異なるコンピュートノード(例:複数のマシン)にある場合、通信のオーバーヘッドは大幅に増加する可能性があります。 その後、レイヤー4から5、6から7までは通常のモデルと同様に動作し、7番目のレイヤーが完了すると、データをしばしばレイヤー0に戻す必要があります(またはラベルを最後のレイヤーに送信します)。これで損失を計算し、オプティマイザが作業を開始できます。 問題点: - 主な欠点、およびなぜこれを「単純な」MPと呼ぶのかは、1つを除いてすべてのGPUがどんな瞬間でもアイドル状態であることです。したがって、4つのGPUを使用する場合、単純なMPは、1つのGPUのメモリ容量を4倍にするのとほぼ同じであり、ハードウェアの残りを無視します。さらに、データのコピーのオーバーヘッドがあることを忘れてはいけません。したがって、4枚の6GBのカードは、データのコピーのオーバーヘッドがない1枚の24GBのカードと同じサイズを収容できるでしょうが、後者はトレーニングをより迅速に完了します。ただし、たとえば40GBのカードがあり、45GBのモデルを収める必要がある場合、勾配とオプティマイザの状態のためにほとんど収めることができません。 - 共有の埋め込みは、GPU間でコピーする必要があるかもしれません。 パイプライン並列処理(PP)は、ほぼ単純なMPと同じですが、GPUがアイドル状態になる問題を解決し、入力バッチをマイクロバッチに分割し、パイプラインを人工的に作成することにより、異なるGPUが計算プロセスに同時に参加できるようにします。 以下は、[GPipe論文](https://ai.googleblog.com/2019/03/introducing-gpipe-open-source-library.html)からの図で、上部には単純なMP、下部にはPPが示されています: ![mp-pp](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-gpipe-bubble.png) この図から、PPがGPUがアイドル状態の領域である「バブル」を少なく持つことがわかります。アイドル状態の部分は「バブル」と呼ばれます。 図の両方の部分は、4つのGPUがパイプラインに参加している4の次元の並列性を示しています。つまり、4つのパイプステージF0、F1、F2、F3のフォワードパスがあり、逆順のバックワードパスB3、B2、B1、B0があります。 PPは調整する新しいハイパーパラメータを導入します。それは `chunks` で、同じパイプステージを通じて連続して送信されるデータのチャンクの数を定義します。たとえば、下の図では `chunks=4` が表示されています。GPU0はチャンク0、1、2、3(F0,0、F0,1、F0,2、F0,3)で同じフォワードパスを実行し、他のGPUが作業を開始し始めるのを待ってから、GPU0はチャンク3、2、1、0(B0,3、B0,2、B0,1、B0,0)で逆順パスを実行します。 注意すべきは、概念的にはこれが勾配蓄積ステップ(GAS)と同じコンセプトであることです。PyTorchは `chunks` を使用し、DeepSpeedは同じハイパーパラメータをGASと呼びます。 `chunks` の導入により、PPはマイクロバッチ(MBS)の概念を導入します。DPはグローバルデータバッチサイズをミニバッチに分割します。したがって、DPの次数が4で、グローバルバッチサイズが1024の場合、4つのミニバッチ(それぞれ256)に分割されます(1024/4)。そして、`chunks`(またはGAS)の数が32である場合、マイクロバッチサイズは8になります(256/32)。各パイプラインステージは1つのマイクロバッチで作業します。 DP + PPセットアップのグローバルバッチサイズを計算するには、`mbs*chunks*dp_degree`(`8*32*4=1024`)を行います。 図に戻りましょう。 `chunks=1` であれば、非効率な単純なMPになります。非常に大きな `chunks` 値を使用すると、非常に小さなマイクロバッチサイズになり、効率があまり高くないかもしれません。したがって、GPUの効率的な利用を最大化する値を見つけるために実験する必要があります。これは、バブルのサイズを最小限にすることに対応する、すべての参加GPUにわたる高い並行GPU利用を可能にするためです。 2つのソリューショングループがあります。従来のパイプラインAPIソリューションと、ユーザーのモデルを大幅に変更する必要があるより現代的なソリューションです。 従来のパイプラインAPIソリューション: - PyTorch - DeepSpeed - Megatron-LM 現代的なソリューション: - Varuna - Sagemaker 従来のパイプラインAPIソリューションの問題点: - モデルをかなり変更する必要があるため、Pipelineはモジュールの通常のフローを`nn.Sequential`シーケンスに再書き込む必要があり、モデルの設計を変更することが必要です。 - 現在、Pipeline APIは非常に制限的です。最初のパイプラインステージに渡されるPython変数のセットがある場合、回避策を見つける必要があります。現在、パイプラインインターフェースでは、唯一のテンソルまたはテンソルのタプルを入力と出力として要求しています。これらのテンソルはバッチサイズを最初の次元として持っている必要があります。パイプラインはミニバッチをマイクロバッチに分割します。可能な改善点については、こちらの議論が行われています:https://github.com/pytorch/pytorch/pull/50693 - パイプステージのレベルでの条件付き制御フローは不可能です。例えば、T5のようなエンコーダーデコーダーモデルは、条件付きエンコーダーステージを処理するために特別な回避策が必要です。 - 各レイヤーを配置する必要があるため、1つのモデルの出力が他のモデルの入力になるようにします。 VarunaとSageMakerとの実験はまだ行っていませんが、彼らの論文によれば、上記で述べた問題のリストを克服し、ユーザーのモデルにははるかに小さな変更しか必要としないと報告されています。 実装: - [Pytorch](https://pytorch.org/docs/stable/pipeline.html) (initial support in pytorch-1.8, and progressively getting improved in 1.9 and more so in 1.10). Some [examples](https://github.com/pytorch/pytorch/blob/master/benchmarks/distributed/pipeline/pipe.py) - [DeepSpeed](https://www.deepspeed.ai/tutorials/pipeline/) - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) has an internal implementation - no API. - [Varuna](https://github.com/microsoft/varuna) - [SageMaker](https://huggingface.co/papers/2111.05972) - this is a proprietary solution that can only be used on AWS. - [OSLO](https://github.com/tunib-ai/oslo) - この実装は、Hugging Face Transformersに基づいています。 🤗 Transformersのステータス: この執筆時点では、いずれのモデルも完全なPP(パイプライン並列処理)をサポートしていません。GPT2モデルとT5モデルは単純なMP(モデル並列処理)サポートを持っています。主な障害は、モデルを`nn.Sequential`に変換できず、すべての入力がテンソルである必要があることです。現在のモデルには、変換を非常に複雑にする多くの機能が含まれており、これらを削除する必要があります。 他のアプローチ: DeepSpeed、Varuna、およびSageMakerは、[交互にパイプラインを実行](https://docs.aws.amazon.com/sagemaker/latest/dg/model-parallel-core-features.html)するコンセプトを使用しています。ここでは、バックワードパスを優先させてバブル(アイドル時間)をさらに最小限に抑えます。 Varunaは、最適なスケジュールを発見するためにシミュレーションを使用してスケジュールをさらに改善しようとします。 OSLOは、`nn.Sequential`の変換なしでTransformersに基づくパイプライン並列処理を実装しています。 ## Tensor Parallelism テンソル並列処理では、各GPUがテンソルのスライスのみを処理し、全体が必要な操作のためにのみ完全なテンソルを集約します。 このセクションでは、[Megatron-LM](https://github.com/NVIDIA/Megatron-LM)論文からのコンセプトと図を使用します:[GPUクラスタでの効率的な大規模言語モデルトレーニング](https://huggingface.co/papers/2104.04473)。 どのトランスフォーマの主要な構築要素は、完全に接続された`nn.Linear`に続く非線形アクティベーション`GeLU`です。 Megatronの論文の表記法に従って、行列の乗算部分を`Y = GeLU(XA)`と書くことができます。ここで、`X`と`Y`は入力ベクトルと出力ベクトルで、`A`は重み行列です。 行列の計算を行列形式で見ると、行列乗算を複数のGPUで分割できる方法が簡単に理解できます: ![Parallel GEMM](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_gemm.png) 重み行列`A`を`N`個のGPUに対して列ごとに分割し、並列で行列乗算`XA_1`から`XA_n`を実行すると、`N`個の出力ベクトル`Y_1、Y_2、...、Y_n`が得られ、それらを独立して`GeLU`に供給できます: ![独立したGeLU](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-independent-gelu.png) この原理を使用して、最後まで同期が必要ないまま、任意の深さのMLPを更新できます。Megatron-LMの著者はそのための有用なイラストを提供しています: ![並列シャード処理](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_shard_processing.png) マルチヘッドアテンションレイヤーを並列化することはさらに簡単です。それらは既に複数の独立したヘッドを持っているため、本質的に並列です! ![並列セルフアテンション](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_self_attention.png) 特別な考慮事項:TPには非常に高速なネットワークが必要であり、したがって1つのノードを超えてTPを実行しないことがお勧めされません。実際には、1つのノードに4つのGPUがある場合、最大のTP度数は4です。TP度数8が必要な場合は、少なくとも8つのGPUを持つノードを使用する必要があります。 このセクションは、元のより詳細な[TPの概要](https://github.com/huggingface/transformers/issues/10321#issuecomment-783543530)に基づいています。 by [@anton-l](https://github.com/anton-l)。 SageMakerは、より効率的な処理のためにTPとDPを組み合わせて使用します。 代替名: - [DeepSpeed](https://github.com/deepspeedai/DeepSpeed)はこれを「テンソルスライシング」と呼びます。詳細は[DeepSpeedの特徴](https://www.deepspeed.ai/training/#model-parallelism)をご覧ください。 実装例: - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM)には、モデル固有の内部実装があります。 - [parallelformers](https://github.com/tunib-ai/parallelformers)(現時点では推論のみ)。 - [SageMaker](https://huggingface.co/papers/2111.05972) - これはAWSでのみ使用できるプロプライエタリなソリューションです。 - [OSLO](https://github.com/tunib-ai/oslo)には、Transformersに基づいたテンソル並列実装があります。 🤗 Transformersの状況: - コア: まだコアには実装されていません。 - ただし、推論が必要な場合、[parallelformers](https://github.com/tunib-ai/parallelformers)はほとんどのモデルに対してサポートを提供します。これがコアに実装されるまで、これを使用できます。そして、トレーニングモードもサポートされることを期待しています。 - Deepspeed-Inferenceでは、BERT、GPT-2、およびGPT-NeoモデルをCUDAカーネルベースの高速推論モードでサポートしています。詳細は[こちら](https://www.deepspeed.ai/tutorials/inference-tutorial/)をご覧ください。 ## DP+PP DeepSpeedの[パイプラインチュートリアル](https://www.deepspeed.ai/tutorials/pipeline/)からの次の図は、DPをPPと組み合わせる方法を示しています。 ![dp-pp-2d](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero-dp-pp.png) ここで重要なのは、DPランク0がGPU2を見えなくし、DPランク1がGPU3を見えなくすることです。DPにとって、存在するのはGPU 0 と 1 のみで、それらの2つのGPUのようにデータを供給します。GPU0はPPを使用してGPU2に一部の負荷を「秘密裏に」オフロードし、GPU1も同様にGPU3を支援に引き入れます。 各次元には少なくとも2つのGPUが必要ですので、ここでは少なくとも4つのGPUが必要です。 実装例: - [DeepSpeed](https://github.com/deepspeedai/DeepSpeed) - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) - [Varuna](https://github.com/microsoft/varuna) - [SageMaker](https://huggingface.co/papers/2111.05972) - [OSLO](https://github.com/tunib-ai/oslo) 🤗 Transformersの状況: まだ実装されていません ## DP+PP+TP さらに効率的なトレーニングを行うために、3Dパラレリズムを使用し、PPをTPとDPと組み合わせます。これは次の図で示されています。 ![dp-pp-tp-3d](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-deepspeed-3d.png) この図は[3Dパラレリズム:兆パラメータモデルへのスケーリング](https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/)というブログ投稿から取得されたもので、おすすめの読み物です。 各次元には少なくとも2つのGPUが必要ですので、ここでは少なくとも8つのGPUが必要です。 実装例: - [DeepSpeed](https://github.com/deepspeedai/DeepSpeed) - DeepSpeedには、さらに効率的なDPであるZeRO-DPと呼ばれるものも含まれています。 - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) - [Varuna](https://github.com/microsoft/varuna) - [SageMaker](https://huggingface.co/papers/2111.05972) - [OSLO](https://github.com/tunib-ai/oslo) 🤗 Transformersの状況: まだ実装されていません。PPとTPがないため。 ## ZeRO DP+PP+TP DeepSpeedの主要な機能の1つはZeROで、これはDPの拡張機能です。これについてはすでに「ZeROデータ並列化」で説明されています。通常、これは単独で動作する機能で、PPやTPは必要ありません。しかし、PPとTPと組み合わせることもできます。 ZeRO-DPがPPと組み合わされる場合、通常はZeROステージ1(オプティマイザーシャーディング)のみが有効になります。 ZeROステージ2(勾配シャーディング)をパイプライン並列化と組み合わせて使用する理論的な可能性はありますが、性能に悪影響を及ぼします。各マイクロバッチごとに勾配をシャーディングする前に、勾配を集約するための追加のリダクションスキャッター集計が必要で、通信オーバーヘッドが発生する可能性があります。パイプライン並列化の性質上、小さなマイクロバッチが使用され、計算の集中度(マイクロバッチサイズ)をバランスにかけ、パイプラインバブル(マイクロバッチ数)を最小限に抑えることに焦点が当てられています。したがって、これらの通信コストは影響を及ぼすでしょう。 さらに、PPには通常よりも少ない層が含まれており、メモリの節約はそれほど大きくありません。PPは既に勾配サイズを「1/PP」に削減するため、勾配シャーディングの節約は純粋なDPよりもはるかに重要ではありません。 ZeROステージ3も同様の理由で適していません - より多くのノード間通信が必要です。 そして、ZeROを持っているので、もう一つの利点はZeRO-Offloadです。これはステージ1オプティマイザーステートをCPUにオフロードできます。 実装例: - [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed)と[BigScienceからのMegatron-Deepspeed](https://github.com/bigscience-workshop/Megatron-DeepSpeed)は、前者のリポジトリのフォークです。 - [OSLO](https://github.com/tunib-ai/oslo) 重要な論文: - [DeepSpeedとMegatronを使用したMegatron-Turing NLG 530Bのトレーニング](https://huggingface.co/papers/2201.11990) 🤗 Transformersの状況: まだ実装されていません。PPとTPがないため。 ## FlexFlow [FlexFlow](https://github.com/flexflow/FlexFlow)は、わずかに異なるアプローチで並列化の問題を解決します。 論文: [Zhihao Jia、Matei Zaharia、Alex Aikenによる "Deep Neural Networksのデータとモデルの並列化を超えて"](https://huggingface.co/papers/1807.05358) FlexFlowは、サンプル-オペレータ-属性-パラメータの4D並列化を行います。 1. サンプル = データ並列化(サンプル単位の並列化) 2. オペレータ = 単一の操作をいくつかのサブ操作に並列化 3. 属性 = データ並列化(長さ方向の並列化) 4. パラメータ = モデル並列化(次元に関係なく、水平または垂直) 例: * サンプル シーケンス長512の10バッチを考えてみましょう。これらをサンプル次元で2つのデバイスに並列化すると、10 x 512が5 x 2 x 512になります。 * オペレータ 層正規化を行う場合、まずstdを計算し、次にmeanを計算し、データを正規化できます。オペレータの並列化により、stdとmeanを並列に計算できます。したがって、オペレータ次元で2つのデバイス(cuda:0、cuda:1)に並列化すると、最初に入力データを両方のデバイスにコピーし、cuda:0でstdを計算し、cuda:1でmeanを同時に計算します。 * 属性 10バッチの512長があります。これらを属性次元で2つのデバイスに並列化すると、10 x 512が10 x 2 x 256になります。 * パラメータ これはテンソルモデルの並列化または単純な層ごとのモデルの並列化と似ています。 このフレームワークの重要性は、(1)GPU/TPU/CPU対(2)RAM/DRAM対(3)高速内部接続/低速外部接続などのリソースを取り、これらすべてをアルゴリズムによって自動的に最適化することです。どの並列化をどこで使用するかをアルゴリズム的に決定します。 非常に重要な側面の1つは、FlexFlowは静的で固定のワークロードを持つモデルのために設計されており、動的な動作を持つモデルはイテレーションごとに異なる並列化戦略を好む場合があることです。 したがって、このフレームワークの約束は非常に魅力的です。選択したクラスタで30分間のシミュレーションを実行し、この特定の環境を最適に利用するための最良の戦略を提供します。部分を追加/削除/置換すると、それに対して実行して再最適化プランを作成します。その後、トレーニングできます。異なるセットアップには独自の最適化があります。 🤗 Transformersの現在の状況: まだ統合されていません。すでに[transformers.utils.fx](https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py)を使用してモデルがFXトレース可能であるため、FlexFlowを動作させるために必要な手順を誰かが見つける必要があります。 ## Which Strategy To Use When ここでは、どの並列化戦略をいつ使用するかの非常におおまかなアウトラインを示します。各リストの最初が通常よりも速いことが一般的です。 **⇨ 単一GPU** * モデルが単一GPUに収まる場合: 1. 通常の使用 * モデルが単一GPUに収まらない場合: 1. ZeRO + CPUをオフロードし、オプションでNVMeをオフロード 2. 上記に加えて、最大のレイヤーが単一GPUに収まらない場合、[Memory Centric Tiling](https://deepspeed.readthedocs.io/en/latest/zero3.html#memory-centric-tiling)(詳細は以下参照)を有効化 * 最大のレイヤーが単一GPUに収まらない場合: 1. ZeROを使用しない場合 - TPを有効化する必要があります。なぜなら、PPだけでは収めることができないからです。 2. ZeROを使用する場合は、上記の「単一GPU」のエントリと同じものを参照してください **⇨ 単一ノード/マルチGPU** * モデルが単一GPUに収まる場合: 1. DDP - 分散データ並列 2. ZeRO - 状況と使用される構成に依存して速いかどうかが異なることがあります * モデルが単一GPUに収まらない場合: 1. PP 2. ZeRO 3. TP 非常に高速なノード内接続がNVLINKまたはNVSwitchである場合、これらのすべてはほとんど同等の性能です。これらがない場合、PPはTPまたはZeROよりも速くなります。TPの度合いも違いを生じるかもしれません。特定のセットアップで勝者を見つけるために実験するのが最善です。 TPはほとんど常に単一ノード内で使用されます。つまり、TPサイズ <= ノードあたりのGPUです。 * 最大のレイヤーが単一GPUに収まらない場合: 1. ZeROを使用しない場合 - TPを使用する必要があります。なぜなら、PPだけでは収めることができないからです。 2. ZeROを使用する場合は、上記の「単一GPU」のエントリと同じものを参照してください **⇨ マルチノード/マルチGPU** * 高速なノード間接続がある場合: 1. ZeRO - モデルへのほとんどの変更が不要です 2. PP+TP+DP - 通信が少なく、モデルに大規模な変更が必要です * 遅いノード間接続があり、GPUメモリが少ない場合: 1. DP+PP+TP+ZeRO-1
transformers/docs/source/ja/perf_train_gpu_many.md/0
{ "file_path": "transformers/docs/source/ja/perf_train_gpu_many.md", "repo_id": "transformers", "token_count": 18235 }
370
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Automatic speech recognition [[open-in-colab]] <Youtube id="TksaY_FDgnk"/> 自動音声認識 (ASR) は音声信号をテキストに変換し、一連の音声入力をテキスト出力にマッピングします。 Siri や Alexa などの仮想アシスタントは ASR モデルを使用してユーザーを日常的に支援しており、ライブキャプションや会議中のメモ取りなど、他にも便利なユーザー向けアプリケーションが数多くあります。 このガイドでは、次の方法を説明します。 1. [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) データセットの [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) を微調整して、音声をテキストに書き起こします。 2. 微調整したモデルを推論に使用します。 <Tip> このタスクと互換性のあるすべてのアーキテクチャとチェックポイントを確認するには、[タスクページ](https://huggingface.co/tasks/automatic-speech-recognition) を確認することをお勧めします。 </Tip> 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install transformers datasets evaluate jiwer ``` モデルをアップロードしてコミュニティと共有できるように、Hugging Face アカウントにログインすることをお勧めします。プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load MInDS-14 dataset まず、🤗 データセット ライブラリから [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) データセットの小さいサブセットをロードします。これにより、完全なデータセットのトレーニングにさらに時間を費やす前に、実験してすべてが機能することを確認する機会が得られます。 ```py >>> from datasets import load_dataset, Audio >>> minds = load_dataset("PolyAI/minds14", name="en-US", split="train[:100]") ``` [`~Dataset.train_test_split`] メソッドを使用して、データセットの `train` 分割をトレイン セットとテスト セットに分割します。 ```py >>> minds = minds.train_test_split(test_size=0.2) ``` 次に、データセットを見てみましょう。 ```py >>> minds DatasetDict({ train: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 16 }) test: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 4 }) }) ``` データセットには`lang_id`や`english_transcription`などの多くの有用な情報が含まれていますが、このガイドでは「`audio`」と「`transciption`」に焦点を当てます。 [`~datasets.Dataset.remove_columns`] メソッドを使用して他の列を削除します。 ```py >>> minds = minds.remove_columns(["english_transcription", "intent_class", "lang_id"]) ``` もう一度例を見てみましょう。 ```py >>> minds["train"][0] {'audio': {'array': array([-0.00024414, 0. , 0. , ..., 0.00024414, 0.00024414, 0.00024414], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'sampling_rate': 8000}, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} ``` 次の 2 つのフィールドがあります。 - `audio`: 音声ファイルをロードしてリサンプリングするために呼び出す必要がある音声信号の 1 次元の `array`。 - `transcription`: ターゲットテキスト。 ## Preprocess 次のステップでは、Wav2Vec2 プロセッサをロードしてオーディオ信号を処理します。 ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base") ``` MInDS-14 データセットのサンプリング レートは 8000kHz です (この情報は [データセット カード](https://huggingface.co/datasets/PolyAI/minds14) で確認できます)。つまり、データセットを再サンプリングする必要があります。事前トレーニングされた Wav2Vec2 モデルを使用するには、16000kHz に設定します。 ```py >>> minds = minds.cast_column("audio", Audio(sampling_rate=16_000)) >>> minds["train"][0] {'audio': {'array': array([-2.38064706e-04, -1.58618059e-04, -5.43987835e-06, ..., 2.78103951e-04, 2.38446111e-04, 1.18740834e-04], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'sampling_rate': 16000}, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} ``` 上の `transcription` でわかるように、テキストには大文字と小文字が混在しています。 Wav2Vec2 トークナイザーは大文字のみでトレーニングされるため、テキストがトークナイザーの語彙と一致することを確認する必要があります。 ```py >>> def uppercase(example): ... return {"transcription": example["transcription"].upper()} >>> minds = minds.map(uppercase) ``` 次に、次の前処理関数を作成します。 1. `audio`列を呼び出して、オーディオ ファイルをロードしてリサンプリングします。 2. オーディオ ファイルから `input_values` を抽出し、プロセッサを使用して `transcription` 列をトークン化します。 ```py >>> def prepare_dataset(batch): ... audio = batch["audio"] ... batch = processor(audio["array"], sampling_rate=audio["sampling_rate"], text=batch["transcription"]) ... batch["input_length"] = len(batch["input_values"][0]) ... return batch ``` データセット全体に前処理関数を適用するには、🤗 Datasets [`~datasets.Dataset.map`] 関数を使用します。 `num_proc` パラメータを使用してプロセスの数を増やすことで、`map` を高速化できます。 [`~datasets.Dataset.remove_columns`] メソッドを使用して、不要な列を削除します。 ```py >>> encoded_minds = minds.map(prepare_dataset, remove_columns=minds.column_names["train"], num_proc=4) ``` 🤗 Transformers には ASR 用のデータ照合器がないため、[`DataCollat​​orWithPadding`] を調整してサンプルのバッチを作成する必要があります。また、テキストとラベルが (データセット全体ではなく) バッチ内の最も長い要素の長さに合わせて動的に埋め込まれ、均一な長さになります。 `padding=True` を設定すると、`tokenizer` 関数でテキストを埋め込むことができますが、動的な埋め込みの方が効率的です。 他のデータ照合器とは異なり、この特定のデータ照合器は、`input_values`と `labels`」に異なるパディング方法を適用する必要があります。 ```py >>> import torch >>> from dataclasses import dataclass, field >>> from typing import Any, Dict, List, Optional, Union >>> @dataclass ... class DataCollatorCTCWithPadding: ... processor: AutoProcessor ... padding: Union[bool, str] = "longest" ... def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]: ... # split inputs and labels since they have to be of different lengths and need ... # different padding methods ... input_features = [{"input_values": feature["input_values"][0]} for feature in features] ... label_features = [{"input_ids": feature["labels"]} for feature in features] ... batch = self.processor.pad(input_features, padding=self.padding, return_tensors="pt") ... labels_batch = self.processor.pad(labels=label_features, padding=self.padding, return_tensors="pt") ... # replace padding with -100 to ignore loss correctly ... labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) ... batch["labels"] = labels ... return batch ``` 次に、`DataCollat​​orForCTCWithPadding` をインスタンス化します。 ```py >>> data_collator = DataCollatorCTCWithPadding(processor=processor, padding="longest") ``` ## Evaluate トレーニング中にメトリクスを含めると、多くの場合、モデルのパフォーマンスを評価するのに役立ちます。 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) ライブラリを使用して、評価メソッドをすばやくロードできます。このタスクでは、[単語エラー率](https://huggingface.co/spaces/evaluate-metric/wer) (WER) メトリクスを読み込みます (🤗 Evaluate [クイック ツアー](https://huggingface.co/docs/evaluate/a_quick_tour) を参照して、メトリクスをロードして計算する方法の詳細を確認してください)。 ```py >>> import evaluate >>> wer = evaluate.load("wer") ``` 次に、予測とラベルを [`~evaluate.EvaluationModule.compute`] に渡して WER を計算する関数を作成します。 ```py >>> import numpy as np >>> def compute_metrics(pred): ... pred_logits = pred.predictions ... pred_ids = np.argmax(pred_logits, axis=-1) ... pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id ... pred_str = processor.batch_decode(pred_ids) ... label_str = processor.batch_decode(pred.label_ids, group_tokens=False) ... wer = wer.compute(predictions=pred_str, references=label_str) ... return {"wer": wer} ``` これで`compute_metrics`関数の準備が整いました。トレーニングをセットアップするときにこの関数に戻ります。 ## Train <frameworkcontent> <pt> <Tip> [`Trainer`] を使用したモデルの微調整に慣れていない場合は、[ここ](../training#train-with-pytorch-trainer) の基本的なチュートリアルをご覧ください。 </Tip> これでモデルのトレーニングを開始する準備が整いました。 [`AutoModelForCTC`] で Wav2Vec2 をロードします。 `ctc_loss_reduction` パラメータで適用する削減を指定します。多くの場合、デフォルトの合計ではなく平均を使用する方が適切です。 ```py >>> from transformers import AutoModelForCTC, TrainingArguments, Trainer >>> model = AutoModelForCTC.from_pretrained( ... "facebook/wav2vec2-base", ... ctc_loss_reduction="mean", ... pad_token_id=processor.tokenizer.pad_token_id, ... ) ``` この時点で残っている手順は次の 3 つだけです。 1. [`TrainingArguments`] でトレーニング ハイパーパラメータを定義します。唯一の必須パラメータは、モデルの保存場所を指定する `output_dir` です。 `push_to_hub=True`を設定して、このモデルをハブにプッシュします (モデルをアップロードするには、Hugging Face にサインインする必要があります)。各エポックの終了時に、[`トレーナー`] は WER を評価し、トレーニング チェックポイントを保存します。 2. トレーニング引数を、モデル、データセット、トークナイザー、データ照合器、および `compute_metrics` 関数とともに [`Trainer`] に渡します。 3. [`~Trainer.train`] を呼び出してモデルを微調整します。 ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_asr_mind_model", ... per_device_train_batch_size=8, ... gradient_accumulation_steps=2, ... learning_rate=1e-5, ... warmup_steps=500, ... max_steps=2000, ... gradient_checkpointing=True, ... fp16=True, ... group_by_length=True, ... eval_strategy="steps", ... per_device_eval_batch_size=8, ... save_steps=1000, ... eval_steps=1000, ... logging_steps=25, ... load_best_model_at_end=True, ... metric_for_best_model="wer", ... greater_is_better=False, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=encoded_minds["train"], ... eval_dataset=encoded_minds["test"], ... processing_class=processor, ... data_collator=data_collator, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` トレーニングが完了したら、 [`~transformers.Trainer.push_to_hub`] メソッドを使用してモデルをハブに共有し、誰もがモデルを使用できるようにします。 ```py >>> trainer.push_to_hub() ``` </pt> </frameworkcontent> <Tip> 自動音声認識用にモデルを微調整する方法のより詳細な例については、英語 ASR および英語のこのブログ [投稿](https://huggingface.co/blog/fine-tune-wav2vec2-english) を参照してください。多言語 ASR については、この [投稿](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) を参照してください。 </Tip> ## Inference モデルを微調整したので、それを推論に使用できるようになりました。 推論を実行したい音声ファイルをロードします。必要に応じて、オーディオ ファイルのサンプリング レートをモデルのサンプリング レートと一致するようにリサンプリングすることを忘れないでください。 ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) >>> sampling_rate = dataset.features["audio"].sampling_rate >>> audio_file = dataset[0]["audio"]["path"] ``` 推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`] で使用することです。モデルを使用して自動音声認識用の`pipeline`をインスタンス化し、オーディオ ファイルをそれに渡します。 ```py >>> from transformers import pipeline >>> transcriber = pipeline("automatic-speech-recognition", model="stevhliu/my_awesome_asr_minds_model") >>> transcriber(audio_file) {'text': 'I WOUD LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'} ``` <Tip> 転写はまあまあですが、もっと良くなる可能性があります。さらに良い結果を得るには、より多くの例でモデルを微調整してみてください。 </Tip> 必要に応じて、「パイプライン」の結果を手動で複製することもできます。 <frameworkcontent> <pt> プロセッサをロードしてオーディオ ファイルと文字起こしを前処理し、`input`を PyTorch テンソルとして返します。 ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("stevhliu/my_awesome_asr_mind_model") >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") ``` Pass your inputs to the model and return the logits: ```py >>> from transformers import AutoModelForCTC >>> model = AutoModelForCTC.from_pretrained("stevhliu/my_awesome_asr_mind_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` 最も高い確率で予測された `input_ids` を取得し、プロセッサを使用して予測された `input_ids` をデコードしてテキストに戻します。 ```py >>> import torch >>> predicted_ids = torch.argmax(logits, dim=-1) >>> transcription = processor.batch_decode(predicted_ids) >>> transcription ['I WOUL LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'] ``` </pt> </frameworkcontent>
transformers/docs/source/ja/tasks/asr.md/0
{ "file_path": "transformers/docs/source/ja/tasks/asr.md", "repo_id": "transformers", "token_count": 7049 }
371
# docstyle-ignore INSTALL_CONTENT = """ # Transformers 설치 방법 ! pip install transformers datasets evaluate accelerate # 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요. # ! pip install git+https://github.com/huggingface/transformers.git """ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] black_avoid_patterns = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
transformers/docs/source/ko/_config.py/0
{ "file_path": "transformers/docs/source/ko/_config.py", "repo_id": "transformers", "token_count": 259 }
372
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 완전 분할 데이터 병렬 처리(FSDP) [[fully-sharded-data-parallel]] [Fully Sharded Data Parallel (FSDP)](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/)은 모델의 매개변수, 그레이디언트 및 옵티마이저 상태를 사용 가능한 GPU(작업자 또는 *랭크*라고도 함) 수에 따라 분할하는 데이터 병렬 처리 방식입니다. [DistributedDataParallel (DDP)](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html)와 달리, FSDP는 각 GPU에 모델을 복제하기 때문에 메모리 사용량을 줄입니다. 이는 GPU 메모리 효율성을 향상시키며 적은 수의 GPU로 훨씬 더 큰 모델을 훈련할 수 있게 합니다. FSDP는 분산 환경에서의 훈련을 쉽게 관리할 수 있는 라이브러리인 Accelerate와 통합되어 있으며, 따라서 [`Trainer`] 클래스에서 사용할 수 있습니다. 시작하기 전에 Accelerate가 설치되어 있고 최소 PyTorch 2.1.0 이상의 버전이 설치되어 있는지 확인하세요. ```bash pip install accelerate ``` ## FSDP 구성 [[fsdp-configuration]] 시작하려면 [`accelerate config`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-config) 명령을 실행하여 훈련 환경에 대한 구성 파일을 생성하세요. Accelerate는 이 구성 파일을 사용하여 `accelerate config`에서 선택한 훈련 옵션에 따라 자동으로 올바른 훈련 환경을 설정합니다. ```bash accelerate config ``` `accelerate config`를 실행하면 훈련 환경을 구성하기 위한 일련의 옵션들이 나타납니다. 이 섹션에서는 가장 중요한 FSDP 옵션 중 일부를 다룹니다. 다른 사용 가능한 FSDP 옵션에 대해 더 알아보고 싶다면 [fsdp_config](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.fsdp_config) 매개변수를 참조하세요. ### 분할 전략 [[sharding-strategy]] FSDP는 여러 가지 분할 전략을 제공합니다: * `FULL_SHARD` - 모델 매개변수, 그레이디언트 및 옵티마이저 상태를 작업자 간에 분할; 이 옵션을 선택하려면 `1`을 선택하세요 * `SHARD_GRAD_OP` - 그레이디언트 및 옵티마이저 상태를 작업자 간에 분할; 이 옵션을 선택하려면 `2`를 선택하세요 * `NO_SHARD` - 아무 것도 분할하지 않음 (DDP와 동일); 이 옵션을 선택하려면 `3`을 선택하세요 * `HYBRID_SHARD` - 각 작업자가 전체 복사본을 가지고 있는 상태에서 모델 매개변수, 그레이디언트 및 옵티마이저 상태를 작업자 내에서 분할; 이 옵션을 선택하려면 `4`를 선택하세요 * `HYBRID_SHARD_ZERO2` - 각 작업자가 전체 복사본을 가지고 있는 상태에서 그레이디언트 및 옵티마이저 상태를 작업자 내에서 분할; 이 옵션을 선택하려면 `5`를 선택하세요 이것은 `fsdp_sharding_strategy` 플래그로 활성화됩니다. ### CPU 오프로드 [[cpu-offload]] 사용하지 않는 매개변수와 그레이디언트를 CPU로 오프로드하여 더 많은 GPU 메모리를 절약하고 FSDP로도 충분하지 않은 큰 모델을 GPU에 적재할 수 있도록 할 수 있습니다. 이는 `accelerate config`를 실행할 때 `fsdp_offload_params: true`로 설정하여 활성화됩니다. ### 래핑 정책 [[wrapping-policy]] FSDP는 네트워크의 각 레이어를 래핑하여 적용됩니다. 래핑은 일반적으로 중첩 방식으로 적용되며 각각 순방향으로 지나간 후 전체 가중치를 삭제하여 다음 레이어에서 사용할 메모리를 절약합니다. *자동 래핑* 정책은 이를 구현하는 가장 간단한 방법이며 코드를 변경할 필요가 없습니다. Transformer 레이어를 래핑하려면 `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP`를 선택하고 래핑할 레이어를 지정하려면 `fsdp_transformer_layer_cls_to_wrap`를 선택하세요 (예: `BertLayer`). 또는 특정 매개변수 수를 초과할 경우 FSDP가 레이어에 적용되는 크기 기반 래핑 정책을 선택할 수 있습니다. 이는 `fsdp_wrap_policy: SIZE_BASED_WRAP` 및 `min_num_param`을 원하는 크기의 임계값으로 설정하여 활성화됩니다. ### 체크포인트 [[checkpointing]] 중간 체크포인트는 `fsdp_state_dict_type: SHARDED_STATE_DICT`로 저장해야 합니다. CPU 오프로드가 활성화된 랭크 0에서 전체 상태 딕셔너리를 저장하는 데 시간이 많이 걸리고, 브로드캐스팅 중 무기한 대기하여 `NCCL Timeout` 오류가 발생할 수 있기 때문입니다. [`~accelerate.Accelerator.load_state`] 메서드를 사용하여 분할된 상태 딕셔너리로 훈련을 재개할 수 있습니다. ```py # 경로가 내재된 체크포인트 accelerator.load_state("ckpt") ``` 그러나 훈련이 끝나면 전체 상태 딕셔너리를 저장해야 합니다. 분할된 상태 딕셔너리는 FSDP와만 호환되기 때문입니다. ```py if trainer.is_fsdp_enabled: trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT") trainer.save_model(script_args.output_dir) ``` ### TPU [[tpu]] [PyTorch XLA](https://pytorch.org/xla/release/2.1/index.html)는 TPU에 대한 FSDP 훈련을 지원하며 `accelerate config`로 생성된 FSDP 구성 파일을 수정하여 활성화할 수 있습니다. 위에서 지정한 분할 전략 및 래핑 옵션 외에도 아래에 표시된 매개변수를 파일에 추가할 수 있습니다. ```yaml xla: True # PyTorch/XLA를 활성화하려면 True로 설정해야 합니다 xla_fsdp_settings: # XLA 특정 FSDP 매개변수 xla_fsdp_grad_ckpt: True # gradient checkpointing을 사용합니다 ``` [`xla_fsdp_settings`](https://github.com/pytorch/xla/blob/2e6e183e0724818f137c8135b34ef273dea33318/torch_xla/distributed/fsdp/xla_fully_sharded_data_parallel.py#L128)는 FSDP에 대한 추가적인 XLA 특정 매개변수를 구성할 수 있게 합니다. ## 훈련 시작 [[launch-training]] 예시 FSDP 구성 파일은 다음과 같을 수 있습니다: ```yaml compute_environment: LOCAL_MACHINE debug: false distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_cpu_ram_efficient_loading: true fsdp_forward_prefetch: false fsdp_offload_params: true fsdp_sharding_strategy: 1 fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_transformer_layer_cls_to_wrap: BertLayer fsdp_use_orig_params: true machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` 훈련을 시작하려면 [`accelerate launch`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-launch) 명령을 실행하세요. 이 때 전에 `accelerate config`로 생성한 구성 파일을 자동으로 사용합니다. ```bash accelerate launch my-trainer-script.py ``` ```bash accelerate launch --fsdp="full shard" --fsdp_config="path/to/fsdp_config/ my-trainer-script.py ``` ## 다음 단계 [[next-steps]] FSDP는 매우 큰 모델을 훈련할 때 강력한 도구가 될 수 있으며, 여러 개의 GPU나 TPU를 사용할 수 있습니다. 모델 매개변수, 옵티마이저 및 그레이디언트 상태를 분할하고 비활성 상태일 때, CPU로 오프로드하면 FSDP는 대규모 훈련의 높은 연산 비용을 줄일 수 있습니다. 더 알아보고 싶다면 다음 자료가 도움이 될 수 있습니다: * [FSDP](https://huggingface.co/docs/accelerate/usage_guides/fsdp)에 대한 더 깊이 있는 Accelerate 가이드를 따라가 보세요. * [PyTorch의 완전 분할 데이터 병렬 처리 (FSDP) API를 소개합니다](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) 블로그 글을 읽어보세요. * [FSDP를 사용하여 클라우드 TPU에서 PyTorch 모델 크기 조절하기](https://pytorch.org/blog/scaling-pytorch-models-on-cloud-tpus-with-fsdp/) 블로그 글을 읽어보세요.
transformers/docs/source/ko/fsdp.md/0
{ "file_path": "transformers/docs/source/ko/fsdp.md", "repo_id": "transformers", "token_count": 5834 }
373
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 파이프라인 [[pipelines]] 파이프라인은 모델을 추론에 활용할 수 있는 훌륭하고 쉬운 방법입니다. 이 파이프라인은 라이브러리의 복잡한 코드를 대부분 추상화하여, 개체명 인식(Named Entity Recognition), 마스크드 언어 모델링(Masked Language Modeling), 감정 분석(Sentiment Analysis), 특성 추출(Feature Extraction), 질의응답(Question Answering) 등의 여러 작업에 특화된 간단한 API를 제공합니다. 사용 예시는 [작업 요약](../task_summary)을 참고하세요. 파이프라인 추상화는 다음 두 가지 범주로 나뉩니다. - \[`파이프라인`]은 다른 모든 파이프라인을 캡슐화하는 가장 강력한 객체입니다. - 작업별 파이프라인은 [오디오](#audio), [컴퓨터 비전](#computer-vision), [자연어 처리](#natural-language-processing), [멀티모달](#multimodal) 작업에 사용할 수 있습니다. ## 파이프라인 추상화 [[the-pipeline-abstraction]] *파이프라인* 추상화는 사용 가능한 모든 파이프라인을 감싸는 래퍼입니다. 다른 파이프라인처럼 인스턴스화되며, 추가적인 편의 기능을 제공합니다. 단일 항목 호출 예시: ```python >>> pipe = pipeline("text-classification") >>> pipe("This restaurant is awesome") [{'label': 'POSITIVE', 'score': 0.9998743534088135}] ``` [hub](https://huggingface.co)에서 특정 모델을 사용하려는 경우, 해당 모델이 이미 허브에 작업을 정의하고 있다면 작업명을 생략할 수 있습니다. ```python >>> pipe = pipeline(model="FacebookAI/roberta-large-mnli") >>> pipe("This restaurant is awesome") [{'label': 'NEUTRAL', 'score': 0.7313136458396912}] ``` 여러 항목을 처리하려면 *리스트*를 전달하세요. ```python >>> pipe = pipeline("text-classification") >>> pipe(["This restaurant is awesome", "This restaurant is awful"] ) [{'label': 'POSITIVE', 'score': 0.9998743534088135}, {'label': 'NEGATIVE', 'score': 0.9996669292449951}] ``` 전체 데이터셋을 순회하려면 `dataset`을 직접 사용하는 것이 좋습니다. 이렇게 하면 전체 데이터를 한 번에 메모리에 올릴 필요도 없고, 배치 처리를 따로 구현하지 않아도 됩니다. 이 방식은 GPU에서 사용자 정의 루프와 유사한 속도로 작동하며, 만약 그렇지 않을 경우 이슈를 등록해 주세요. ```python import datasets from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset from tqdm.auto import tqdm pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0) dataset = datasets.load_dataset("superb", name="asr", split="test") # KeyDataset (*pt* 전용)는 데이터셋 항목의 딕셔너리에서 지정된 키만 반환합니다. # 이 예제에서는 *target* 항목이 필요하지 않으므로 KeyDataset을 사용합니다. 문장 쌍 입력에는 KeyPairDataset을 사용하세요. for out in tqdm(pipe(KeyDataset(dataset, "file"))): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # .... ``` 더 편리하게 사용하려면 제너레이터도 가능합니다. ```python from transformers import pipeline pipe = pipeline("text-classification") def data(): while True: # 데이터는 데이터셋, 데이터베이스, 큐 또는 HTTP 요청에서 올 수 있습니다. # 서버에서 # 주의: 반복적이므로 `num_workers > 1` 변수를 사용할 수 없습니다. # 데이터를 전처리하기 위해 여러 스레드를 사용할 수 없습니다. 여전히 # 메인 스레드가 대규모 추론을 수행하는 동안 하나의 스레드가 전처리를 수행할 수 있습니다. yield "This is a test" for out in pipe(data()): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # .... ``` \[\[autodoc]] pipeline ## 파이프라인 배치 처리 [[pipeline-batching]] 모든 파이프라인은 배치 처리를 지원합니다. 리스트, `Dataset`, `Generator` 전달 시 스트리밍 기능을 사용할 때 작동합니다. ```python from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset import datasets dataset = datasets.load_dataset("imdb", name="plain_text", split="unsupervised") pipe = pipeline("text-classification", device=0) for out in pipe(KeyDataset(dataset, "text"), batch_size=8, truncation="only_first"): print(out) # [{'label': 'POSITIVE', 'score': 0.9998743534088135}] # 이전과 동일한 출력이지만, 내용을 배치로 모델에 전달합니다. ``` <Tip warning={true}> 하지만 배치 처리가 항상 성능 향상을 보장하는 것은 아닙니다. 하드웨어, 데이터, 모델에 따라 속도가 10배로 빨라질수도, 5배 느려질 수 있습니다. 주로 속도 향상이 있는 예시: </Tip> ```python from transformers import pipeline from torch.utils.data import Dataset from tqdm.auto import tqdm pipe = pipeline("text-classification", device=0) class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): return "This is a test" dataset = MyDataset() for batch_size in [1, 8, 64, 256]: print("-" * 30) print(f"Streaming batch_size={batch_size}") for out in tqdm(pipe(dataset, batch_size=batch_size), total=len(dataset)): pass ``` ``` # On GTX 970 ------------------------------ Streaming no batching 100%|██████████████████████████████████████████████████████████████████████| 5000/5000 [00:26<00:00, 187.52it/s] ------------------------------ Streaming batch_size=8 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:04<00:00, 1205.95it/s] ------------------------------ Streaming batch_size=64 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:02<00:00, 2478.24it/s] ------------------------------ Streaming batch_size=256 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:01<00:00, 2554.43it/s] (diminishing returns, saturated the GPU) ``` 주로 속도 저하가 있는 예시: ```python class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): if i % 64 == 0: n = 100 else: n = 1 return "This is a test" * n ``` 이는 다른 문장들에 비해 간헐적으로 매우 긴 문장이 포함된 경우입니다. 이 경우 **전체** 배치가 400토큰 길이로 ([64, 400]) 되어야 하므로, [64, 4] 대신 [64, 400]이 되어 크게 속도가 저하됩니다. 게다가, 더 큰 배치에서는 프로그램이 충돌할 수 있습니다. ``` ------------------------------ Streaming no batching 100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:05<00:00, 183.69it/s] ------------------------------ Streaming batch_size=8 100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:03<00:00, 265.74it/s] ------------------------------ Streaming batch_size=64 100%|██████████████████████████████████████████████████████████████████████| 1000/1000 [00:26<00:00, 37.80it/s] ------------------------------ Streaming batch_size=256 0%| | 0/1000 [00:00<?, ?it/s] Traceback (most recent call last): File "/home/nicolas/src/transformers/test.py", line 42, in <module> for out in tqdm(pipe(dataset, batch_size=256), total=len(dataset)): .... q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) RuntimeError: CUDA out of memory. Tried to allocate 376.00 MiB (GPU 0; 3.95 GiB total capacity; 1.72 GiB already allocated; 354.88 MiB free; 2.46 GiB reserved in total by PyTorch) ``` 일반적인 해결책은 없으며, 사용 사례에 따라 다를 수 있습니다. 사용자를 위한 경험상 지침: - **하드웨어와 실제 워크로드로 성능을 측정하세요. 측정이 답입니다.** - 실시간 추론(latency)이 중요하다면 배치 처리하지 마세요. - CPU 사용 시에도 배치 처리하지 않는 것이 좋습니다. - GPU에서 정적 데이터 처리(throughput)가 목적이라면 - 입력 시퀀스 길이("실제" 데이터)를 잘 모르는 경우, 기본적으로 배치 처리하지 말고 성능을 측정하면서 임시로 배치를 적용해 보고, 실패 시 이를 복구할 수 있도록 OOM 검사 로직을 추가하세요. (시퀀스 길이를 제어하지 않으면 언젠가는 실패하게 됩니다.) - 시퀀스 길이가 일정하다면 배치 처리가 유리할 수 있습니다. 측정하며 OOM까지 시도해 보세요. - GPU 메모리가 클수록 배치 처리의 이점이 큽니다. - 배치 처리 활성화 시 OOM을 핸들링할 수 있도록 대비하세요. ## 파이프라인 청크 배치 처리 [[pipeline-chunk-batching]] `제로샷 분류` 및 `질의응답` 파이프라인은 단일 입력이 여러 포워드 패스를 유발할 수 있어 `배치 크기` 인자를 그대로 사용하면 문제가 발생할 수 있습니다. 이를 해결하기 위해 두 파이프라인은 `청크 파이프라인` 형태로 동작합니다. 요약하면 ```python preprocessed = pipe.preprocess(inputs) model_outputs = pipe.forward(preprocessed) outputs = pipe.postprocess(model_outputs) ``` 이제 내부적으로는 ```python all_model_outputs = [] for preprocessed in pipe.preprocess(inputs): model_outputs = pipe.forward(preprocessed) all_model_outputs.append(model_outputs) outputs = pipe.postprocess(all_model_outputs) ``` 파이프라인의 사용 방식이 동일하므로, 코드에는 거의 영향을 주지 않습니다. 파이프라인은 배치 처리를 자동으로 수행하기 때문에 입력이 몇 번의 포워드 패스를 발생시키는지 고려할 필요 없이, `배치 크기`는 입력과 무관하게 최적화할 수 있습니다. 다만 앞서 언급한 주의사항은 여전히 유효합니다. ## 파이프라인 FP16 추론 [[pipeline-fp16-inference]] 모델은 FP16 모드로 실행할 수 있으며, GPU에서 메모리를 절약하면서 처리 속도를 크게 향상시킬 수 있습니다. 대부분의 모델은 성능 저하 없이 FP16을 지원하며, 모델이 클수록 성능 저하 가능성은 더 낮아집니다. FP16 추론을 활성화하려면 파이프라인 생성자에 `dtype=torch.float16` 또는 `dtype='float16'`을 전달하세요. 이 기능은 파이토치 백엔드를 사용하는 모델에서만 작동하며, 입력은 내부적으로 FP16 형식으로 변환됩니다. ## 파이프라인 사용자 정의 코드 [[pipeline-custom-code]] 특정 파이프라인을 오버라이드하려면, 먼저 해당 작업에 대한 이슈를 등록해 주세요. 파이프라인의 목표는 대부분의 사용 사례를 지원하는 것이므로, `transformers` 팀이 추가 지원을 고려할 수 있습니다. 간단히 시도하려면 파이프라인 클래스를 상속하세요. ```python class MyPipeline(TextClassificationPipeline): def postprocess(): # 사용자 정의 후처리 코드 작성 scores = scores * 100 # 추가 코드 작성 my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) # 또는 *pipeline* 함수를 사용할 경우: my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline) ``` 이를 통해 원하는 모든 커스텀 코드를 적용할 수 있습니다. ## 파이프라인 구현하기 [[implementing-a-pipeline]] [새 파이프라인 구현](../add_new_pipeline) ## 오디오 [[audio]] 오디오 작업에 사용할 수 있는 파이프라인은 다음과 같습니다. ### AudioClassificationPipeline [[transformers.AudioClassificationPipeline]] [[autodoc]] AudioClassificationPipeline - __call__ - all ### AutomaticSpeechRecognitionPipeline [[transformers.AutomaticSpeechRecognitionPipeline]] [[autodoc]] AutomaticSpeechRecognitionPipeline - __call__ - all ### TextToAudioPipeline [[transformers.TextToAudioPipeline]] [[autodoc]] TextToAudioPipeline - __call__ - all ### ZeroShotAudioClassificationPipeline [[transformers.ZeroShotAudioClassificationPipeline]] [[autodoc]] ZeroShotAudioClassificationPipeline - __call__ - all ## 컴퓨터 비전 [[computer-vision]] 컴퓨터 비전 작업에 사용할 수 있는 파이프라인은 다음과 같습니다. ### DepthEstimationPipeline [[transformers.DepthEstimationPipeline]] [[autodoc]] DepthEstimationPipeline - __call__ - all ### ImageClassificationPipeline [[transformers.ImageClassificationPipeline]] [[autodoc]] ImageClassificationPipeline - __call__ - all ### ImageSegmentationPipeline [[transformers.ImageSegmentationPipeline]] [[autodoc]] ImageSegmentationPipeline - __call__ - all ### ImageToImagePipeline [[transformers.ImageToImagePipeline]] [[autodoc]] ImageToImagePipeline - __call__ - all ### ObjectDetectionPipeline [[transformers.ObjectDetectionPipeline]] [[autodoc]] ObjectDetectionPipeline - __call__ - all ### VideoClassificationPipeline [[transformers.VideoClassificationPipeline]] [[autodoc]] VideoClassificationPipeline - __call__ - all ### ZeroShotImageClassificationPipeline [[transformers.ZeroShotImageClassificationPipeline]] [[autodoc]] ZeroShotImageClassificationPipeline - __call__ - all ### ZeroShotObjectDetectionPipeline [[transformers.ZeroShotObjectDetectionPipeline]] [[autodoc]] ZeroShotObjectDetectionPipeline - __call__ - all ## 자연어 처리 [[natural-language-processing]] 자연어 처리 작업에 사용할 수 있는 파이프라인은 다음과 같습니다. ### FillMaskPipeline [[transformers.FillMaskPipeline]] [[autodoc]] FillMaskPipeline - __call__ - all ### QuestionAnsweringPipeline [[transformers.QuestionAnsweringPipeline]] [[autodoc]] QuestionAnsweringPipeline - __call__ - all ### SummarizationPipeline [[transformers.SummarizationPipeline]] [[autodoc]] SummarizationPipeline - __call__ - all ### TableQuestionAnsweringPipeline [[transformers.TableQuestionAnsweringPipeline]] [[autodoc]] TableQuestionAnsweringPipeline - __call__ ### TextClassificationPipeline [[transformers.TextClassificationPipeline]] [[autodoc]] TextClassificationPipeline - __call__ - all ### TextGenerationPipeline [[transformers.TextGenerationPipeline]] [[autodoc]] TextGenerationPipeline - __call__ - all ### Text2TextGenerationPipeline [[transformers.Text2TextGenerationPipeline]] [[autodoc]] Text2TextGenerationPipeline - __call__ - all ### TokenClassificationPipeline [[transformers.TokenClassificationPipeline]] [[autodoc]] TokenClassificationPipeline - __call__ - all ### TranslationPipeline [[transformers.TranslationPipeline]] [[autodoc]] TranslationPipeline - __call__ - all ### ZeroShotClassificationPipeline [[transformers.ZeroShotClassificationPipeline]] [[autodoc]] ZeroShotClassificationPipeline - __call__ - all ## 멀티모달 [[multimodal]] 멀티모달 작업에 사용할 수 있는 파이프라인은 다음과 같습니다. ### DocumentQuestionAnsweringPipeline [[transformers.DocumentQuestionAnsweringPipeline]] [[autodoc]] DocumentQuestionAnsweringPipeline - __call__ - all ### FeatureExtractionPipeline [[transformers.FeatureExtractionPipeline]] [[autodoc]] FeatureExtractionPipeline - __call__ - all ### ImageFeatureExtractionPipeline [[transformers.ImageFeatureExtractionPipeline]] [[autodoc]] ImageFeatureExtractionPipeline - __call__ - all ### ImageToTextPipeline [[transformers.ImageToTextPipeline]] [[autodoc]] ImageToTextPipeline - __call__ - all ### ImageTextToTextPipeline [[transformers.ImageTextToTextPipeline]] [[autodoc]] ImageTextToTextPipeline - __call__ - all ### MaskGenerationPipeline [[transformers.MaskGenerationPipeline]] [[autodoc]] MaskGenerationPipeline - __call__ - all ### VisualQuestionAnsweringPipeline [[transformers.VisualQuestionAnsweringPipeline]] [[autodoc]] VisualQuestionAnsweringPipeline - __call__ - all ## Parent class: `Pipeline` [[transformers.Pipeline]] [[autodoc]] Pipeline
transformers/docs/source/ko/main_classes/pipelines.md/0
{ "file_path": "transformers/docs/source/ko/main_classes/pipelines.md", "repo_id": "transformers", "token_count": 9346 }
374
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BioGPT [[biogpt]] ## 개요 [[overview]] BioGPT는 Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon, Tie-Yan Liu에 의해 [BioGPT: generative pre-trained transformer for biomedical text generation and mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9) 에서 제안된 모델입니다. BioGPT는 생물의학 텍스트 생성과 마이닝을 위해 도메인에 특화된 생성형 사전 학습 트랜스포머 언어 모델입니다. BioGPT는 트랜스포머 언어 모델 구조를 따르며, 1,500만 개의 PubMed 초록을 이용해 처음부터 학습되었습니다. 논문의 초록은 다음과 같습니다: *생물의학 분야에서 사전 학습된 언어 모델은 일반 자연어 처리 분야에서의 성공에 영감을 받아 점점 더 많은 주목을 받고 있습니다. 일반 언어 분야에서 사전 학습된 언어 모델의 두 가지 주요 계통인 BERT(및 그 변형)와 GPT(및 그 변형) 중 첫 번째는 생물의학 분야에서 BioBERT와 PubMedBERT와 같이 광범위하게 연구되었습니다. 이들은 다양한 분류 기반의 생물의학 작업에서 큰 성공을 거두었지만, 생성 능력의 부족은 그들의 적용 범위를 제한했습니다. 본 논문에서는 대규모 생물의학 문헌을 사전 학습한 도메인 특화 생성형 트랜스포머 언어 모델인 BioGPT를 제안합니다. 우리는 6개의 생물의학 자연어 처리 작업에서 BioGPT를 평가한 결과, 대부분의 작업에서 이전 모델보다 우수한 성능을 보였습니다. 특히, BC5CDR, KD-DTI, DDI 엔드-투-엔드 관계 추출 작업에서 각각 44.98%, 38.42%, 40.76%의 F1 점수를 기록하였으며, PubMedQA에서 78.2%의 정확도를 달성해 새로운 기록을 세웠습니다. 또한 텍스트 생성에 대한 사례 연구는 생물의학 용어에 대한 유창한 설명을 생성하는 데 있어 BioGPT의 장점을 더욱 입증했습니다.* 이 모델은 [kamalkraj](https://huggingface.co/kamalkraj)에 의해 기여되었습니다. 원본 코드는 [여기](https://github.com/microsoft/BioGPT)에서 찾을 수 있습니다. ## 사용 팁 [[usage-tips]] - BioGPT는 절대적 위치 임베딩(absolute position embedding)을 사용하므로, 입력을 왼쪽이 아닌 오른쪽에서 패딩하는 것이 권장됩니다. - BioGPT는 인과적 언어 모델링(Casual Langague Modeling, CLM) 목표로 학습되었기 때문에, 다음 토큰을 예측하는 데 강력한 성능을 보입니다. 이 기능을 활용하여 BioGPT는 구문적으로 일관된 텍스트를 생성할 수 있으며, 예시 스크립트 `run_generation.py`에서 이를 확인할 수 있습니다. - 이 모델은 `past_key_values`(PyTorch 용)를 입력으로 받을 수 있는데, 이는 이전에 계산된 키/값 어텐션 쌍입니다. 이 값을 사용하면 텍스트 생성 중 이미 계산된 값을 다시 계산하지 않도록 할 수 있습니다. PyTorch에서 `past_key_values` 인수는 BioGptForCausalLM.forward() 메소드에서 자세히 설명되어 있습니다. ### Scaled Dot Product Attention(SDPA) 사용 [[using-scaled-dot-product-attention-sdpa]] PyTorch는 `torch.nn.functional`의 일부로 스케일된 점곱 어텐션(SDPA) 연산자를 기본적으로 포함합니다. 이 함수는 입력과 사용 중인 하드웨어에 따라 여러 구현을 적용할 수 있습니다. 자세한 내용은 [공식 문서](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) 또는 [GPU 추론](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention) 페이지를 참조하세요. `torch>=2.1.1`에서 구현이 가능한 경우 SDPA는 기본적으로 사용되며, `attn_implementation="sdpa"`를 `from_pretrained()`에서 설정하여 SDPA 사용을 명시적으로 요청할 수 있습니다. ``` from transformers import BioGptForCausalLM model = BioGptForCausalLM.from_pretrained("microsoft/biogpt", attn_implementation="sdpa", dtype=torch.float16) ``` NVIDIA GeForce RTX 2060-8GB, PyTorch 2.3.1, Ubuntu 20.04 환경에서 `float16` 및 CausalLM 헤드가 있는 `microsoft/biogpt` 모델로 로컬 벤치마크를 수행한 결과, 훈련 중 다음과 같은 속도 향상을 확인했습니다. 최적의 속도 향상을 위해 모델을 반정밀도(예: `torch.float16` 또는 `torch.bfloat16`)로 로드하는 것이 좋습니다. | num_training_steps | batch_size | seq_len | is cuda | Time per batch (eager - s) | Time per batch (sdpa - s) | Speedup (%) | Eager peak mem (MB) | sdpa peak mem (MB) | Mem saving (%) | |--------------------|------------|---------|---------|----------------------------|---------------------------|-------------|---------------------|--------------------|----------------| | 100 | 1 | 128 | False | 0.038 | 0.031 | 21.301 | 1601.862 | 1601.497 | 0.023 | | 100 | 1 | 256 | False | 0.039 | 0.034 | 15.084 | 1624.944 | 1625.296 | -0.022 | | 100 | 2 | 128 | False | 0.039 | 0.033 | 16.820 | 1624.567 | 1625.296 | -0.045 | | 100 | 2 | 256 | False | 0.065 | 0.059 | 10.255 | 1672.164 | 1672.164 | 0.000 | | 100 | 4 | 128 | False | 0.062 | 0.058 | 6.998 | 1671.435 | 1672.164 | -0.044 | | 100 | 4 | 256 | False | 0.113 | 0.100 | 13.316 | 2350.179 | 1848.435 | 27.144 | | 100 | 8 | 128 | False | 0.107 | 0.098 | 9.883 | 2098.521 | 1848.435 | 13.530 | | 100 | 8 | 256 | False | 0.222 | 0.196 | 13.413 | 3989.980 | 2986.492 | 33.601 | NVIDIA GeForce RTX 2060-8GB, PyTorch 2.3.1, Ubuntu 20.04 환경에서 `float16` 및 AutoModel 헤드가 있는 `microsoft/biogpt` 모델로 추론 중 다음과 같은 속도 향상을 확인했습니다. | num_batches | batch_size | seq_len | is cuda | is half | use mask | Per token latency eager (ms) | Per token latency SDPA (ms) | Speedup (%) | Mem eager (MB) | Mem BT (MB) | Mem saved (%) | |-------------|------------|---------|---------|---------|----------|------------------------------|-----------------------------|-------------|----------------|--------------|---------------| | 50 | 1 | 64 | True | True | True | 0.115 | 0.098 | 17.392 | 716.998 | 716.998 | 0.000 | | 50 | 1 | 128 | True | True | True | 0.115 | 0.093 | 24.640 | 730.916 | 730.916 | 0.000 | | 50 | 2 | 64 | True | True | True | 0.114 | 0.096 | 19.204 | 730.900 | 730.900 | 0.000 | | 50 | 2 | 128 | True | True | True | 0.117 | 0.095 | 23.529 | 759.262 | 759.262 | 0.000 | | 50 | 4 | 64 | True | True | True | 0.113 | 0.096 | 18.325 | 759.229 | 759.229 | 0.000 | | 50 | 4 | 128 | True | True | True | 0.186 | 0.178 | 4.289 | 816.478 | 816.478 | 0.000 | ## 리소스 [[resources]] - [인과적 언어 모델링 작업 가이드](../tasks/language_modeling) ## BioGptConfig [[transformers.BioGptConfig]] [[autodoc]] BioGptConfig ## BioGptTokenizer [[transformers.BioGptTokenizer]] [[autodoc]] BioGptTokenizer - save_vocabulary ## BioGptModel [[transformers.BioGptModel]] [[autodoc]] BioGptModel - forward ## BioGptForCausalLM [[transformers.BioGptForCausalLM]] [[autodoc]] BioGptForCausalLM - forward ## BioGptForTokenClassification [[transformers.BioGptForTokenClassification]] [[autodoc]] BioGptForTokenClassification - forward ## BioGptForSequenceClassification [[transformers.BioGptForSequenceClassification]] [[autodoc]] BioGptForSequenceClassification - forward
transformers/docs/source/ko/model_doc/biogpt.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/biogpt.md", "repo_id": "transformers", "token_count": 6060 }
375
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Gemma2 [[gemma2]] ## 개요 [[overview]] Gemma2 모델은 Google의 Gemma2 팀이 작성한 [Gemma2: Open Models Based on Gemini Technology and Research](https://blog.google/technology/developers/google-gemma-2/)에서 제안되었습니다. 파라미터 크기가 각각 90억(9B)과 270억(27B)인 두 가지 Gemma2 모델이 출시되었습니다. 블로그 게시물의 초록은 다음과 같습니다: *이제 우리는 전 세계의 연구자와 개발자들에게 Gemma 2를 공식적으로 출시합니다. 90억(9B)과 270억(27B) 파라미터 크기로 제공되는 Gemma 2는 1세대보다 더 높은 성능과 추론 효율성을 제공하며, 상당한 안전성 향상을 포함하고 있습니다. 사실 270억 규모의 모델은 크기가 두 배 이상인 모델과 비교해도 경쟁력 있는 대안을 제공하며, 이는 작년 12월까지만 해도 독점 모델에서만 가능했던 성능을 제공합니다.* 팁: - 원본 체크포인트는 변환 스크립트 `src/transformers/models/Gemma2/convert_Gemma2_weights_to_hf.py`를 사용하여 변환할 수 있습니다. 이 모델은 [Arthur Zucker](https://huggingface.co/ArthurZ), [Pedro Cuenca](https://huggingface.co/pcuenq), [Tom Arsen]()이 기여했습니다. ## Gemma2Config [[transformers.Gemma2Config]] [[autodoc]] Gemma2Config ## Gemma2Model [[transformers.Gemma2Model]] [[autodoc]] Gemma2Model - forward ## Gemma2ForCausalLM [[transformers.Gemma2ForCausalLM]] [[autodoc]] Gemma2ForCausalLM - forward ## Gemma2ForSequenceClassification [[transformers.Gemma2ForSequenceClassification]] [[autodoc]] Gemma2ForSequenceClassification - forward ## Gemma2ForTokenClassification [[transformers.Gemma2ForTokenClassification]] [[autodoc]] Gemma2ForTokenClassification - forward
transformers/docs/source/ko/model_doc/gemma2.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/gemma2.md", "repo_id": "transformers", "token_count": 1279 }
376
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PaliGemma[[paligemma]] ## 개요[[overview]] PaliGemma 모델은 구글이 제안한 [PaliGemma – Google의 최첨단 오픈 비전 언어 모델](https://huggingface.co/blog/paligemma)에서 소개 되었습니다. PaliGemma는 [SigLIP](siglip) 비전 인코더와 [Gemma](gemma) 언어 인코더로 구성된 3B 규모의 비전-언어 모델로, 두 인코더가 멀티모달 선형 프로젝션으로 연결되어 있습니다. 이 모델은 이미지를 고정된 수의 VIT토큰으로 분할하고 이를 선택적 프롬프트 앞에 추가 하며, 모든 이미지 토큰과 입력 텍스트 토큰에 대해 전체 블록 어텐션을 사용하는 특징을 가지고 있습니다. PaliGemma는 224x224, 448x448, 896x896의 3가지 해상도로 제공되며, 3개의 기본 모델과 55개의 다양한 작업에 대해 미세 조정된 버전, 그리고 2개의 혼합 모델이 있습니다. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/paligemma/paligemma_arch.png" alt="drawing" width="600"/> <small> PaliGemma 아키텍처 <a href="https://huggingface.co/blog/paligemma">블로그 포스트.</a> </small> 이 모델은 [Molbap](https://huggingface.co/Molbap)에 의해 기여 되었습니다. ## 사용 팁[[usage-tips]] PaliGemma의 추론은 다음처럼 수행됩니다: ```python from transformers import AutoProcessor, PaliGemmaForConditionalGeneration model_id = "google/paligemma-3b-mix-224" model = PaliGemmaForConditionalGeneration.from_pretrained(model_id) processor = AutoProcessor.from_pretrained(model_id) prompt = "What is on the flower?" image_file = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg?download=true" raw_image = Image.open(requests.get(image_file, stream=True).raw) inputs = processor(raw_image, prompt, return_tensors="pt") output = model.generate(**inputs, max_new_tokens=20) print(processor.decode(output[0], skip_special_tokens=True)[len(prompt):]) ``` - PaliGemma는 대화용으로 설계되지 않았으며, 특정 사용 사례에 대해 미세 조정할 때 가장 잘 작동합니다. PaliGemma를 미세 조정할 수 있는 몇 가지 하위 작업에는 이미지 캡셔닝, 시각적 질문 답변(VQA), 오브젝트 디텍션, 참조 표현 분할 및 문서 이해가 포함됩니다. - 모델에 필요한 이미지, 텍스트 및 선택적 레이블을 준비하는데 `PaliGemmaProcessor`를 사용할 수 있습니다. PaliGemma 모델을 미세 조정할 때는, 프로세서에 `suffix`인자를 전달하여 다음 처럼 모델의 `labels`를 생성할 수 있습니다: ```python prompt = "What is on the flower?" answer = "a bee" inputs = processor(images=raw_image, text=prompt, suffix=answer, return_tensors="pt") ``` ## 자료[[resources]] PaliGemma를 시작하는 데 도움이 되는 Hugging Face와 community 자료 목록(🌎로 표시됨) 입니다.여기에 포함될 자료를 제출하고 싶으시다면 PR(Pull Request)를 열어주세요. 리뷰 해드리겠습니다! 자료는 기존 자료를 복제하는 대신 새로운 내용을 담고 있어야 합니다. - PaliGemma의 모든 기능을 소개하는 블로그 포스트는 [이곳](https://huggingface.co/blog/paligemma)에서 찾을 수 있습니다. 🌎 - Trainer API를 사용하여 VQA(Visual Question Answering)를 위해 PaliGemma를 미세 조정하는 방법과 추론에 대한 데모 노트북은 [이곳](https://github.com/huggingface/notebooks/tree/main/examples/paligemma)에서 찾을 수 있습니다. 🌎 - 사용자 정의 데이터셋(영수증 이미지 -> JSON)에 대해 PaliGemma를 미세 조정하는 방법과 추론에 대한 데모 노트북은 [이곳](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/PaliGemma)에서 찾을 수 있습니다. 🌎 ## PaliGemmaConfig[[transformers.PaliGemmaConfig]] [[autodoc]] PaliGemmaConfig ## PaliGemmaProcessor[[transformers.PaliGemmaProcessor]] [[autodoc]] PaliGemmaProcessor ## PaliGemmaForConditionalGeneration[[transformers.PaliGemmaForConditionalGeneration]] [[autodoc]] PaliGemmaForConditionalGeneration - forward
transformers/docs/source/ko/model_doc/paligemma.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/paligemma.md", "repo_id": "transformers", "token_count": 2814 }
377
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Whisper [[whisper]] ## 개요 [[overview]] Whisper 모델은 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever에 의해 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf)에서 제안되었습니다. 논문의 초록은 다음과 같습니다: *우리는 인터넷에서 대량의 오디오를 글로 옮긴 것을 예측하도록 간단히 훈련된 음성 처리 시스템의 성능을 연구합니다. 68만 시간의 다국어 및 다중 작업 지도(multitask supervision)에 확장했을 때, 결과 모델은 표준 벤치마크에 잘 일반화되며, 미세 조정이 필요 없는 제로샷 전송 설정에서 이전의 완전히 지도된(fully-supervised) 결과와 경쟁할 수 있는 경우가 많습니다. 사람과 비교하면, 이 모델은 사람의 정확도와 견고성에 근접합니다. 우리는 강력한 음성 처리를 위한 추가 작업의 기반이 될 모델과 추론 코드를 공개합니다.* 팁: - 이 모델은 일반적으로 별도의 미세 조정 없이도 잘 작동합니다. - 아키텍처는 고전적인 인코더-디코더 아키텍처를 따르기 때문에, 추론을 위해 [`~generation.GenerationMixin.generate`] 함수를 사용합니다. - 현재 추론은 짧은 형식에만 구현되어 있으며, 오디오는 30초 미만의 세그먼트로 미리 분할되어야 합니다. 타임스탬프를 포함한 긴 형식에 대한 추론은 향후 릴리스에서 구현될 예정입니다. - [`WhisperProcessor`]를 사용하여 모델에 사용할 오디오를 준비하고, 예측된 ID를 텍스트로 디코딩할 수 있습니다. - 모델과 프로세서를 변환하려면 다음을 사용하는 것이 좋습니다: ```bash python src/transformers/models/whisper/convert_openai_to_hf.py --checkpoint_path "" --pytorch_dump_folder_path "Arthur/whisper-3" --convert_preprocessor True ``` 스크립트는 OpenAI 체크포인트에서 필요한 모든 매개변수를 자동으로 결정합니다. OpenAI 변환을 수행하려면 `tiktoken` 라이브러리를 설치해야 합니다. 라이브러리를 설치해야 OpenAI 토큰화기를 `tokenizers` 버전으로 변환할 수 있습니다. 이 모델은 [Arthur Zucker](https://huggingface.co/ArthurZ)에 의해 제공되었습니다. 이 모델의 Tensorflow 버전은 [amyeroberts](https://huggingface.co/amyeroberts)에 의해 제공되었습니다. 원본 코드는 [여기](https://github.com/openai/whisper)에서 찾을 수 있습니다. ## WhisperConfig [[whisperconfig]] [[autodoc]] WhisperConfig ## WhisperTokenizer [[whispertokenizer]] [[autodoc]] WhisperTokenizer - set_prefix_tokens - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## WhisperTokenizerFast [[whispertokenizerfast]] [[autodoc]] WhisperTokenizerFast - set_prefix_tokens - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## WhisperFeatureExtractor [[whisperfeatureextractor]] [[autodoc]] WhisperFeatureExtractor - __call__ ## WhisperProcessor [[whisperprocessor]] [[autodoc]] WhisperProcessor - __call__ - from_pretrained - save_pretrained - batch_decode - decode ## WhisperModel [[whispermodel]] [[autodoc]] WhisperModel - forward - _mask_input_features ## WhisperForConditionalGeneration [[whisperforconditionalgeneration]] [[autodoc]] WhisperForConditionalGeneration - forward ## WhisperForAudioClassification [[whisperforaudioclassification]] [[autodoc]] WhisperForAudioClassification - forward ## TFWhisperModel [[tfwhispermodel]] [[autodoc]] TFWhisperModel - call ## TFWhisperForConditionalGeneration [[tfwhisperforconditionalgeneration]] [[autodoc]] TFWhisperForConditionalGeneration - call ## FlaxWhisperModel [[flaxwhispermodel]] [[autodoc]] FlaxWhisperModel - __call__ ## FlaxWhisperForConditionalGeneration [[flaxwhisperforconditionalgeneration]] [[autodoc]] FlaxWhisperForConditionalGeneration - __call__ ## FlaxWhisperForAudioClassification [[flaxwhisperforaudioclassification]] [[autodoc]] FlaxWhisperForAudioClassification - __call__
transformers/docs/source/ko/model_doc/whisper.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/whisper.md", "repo_id": "transformers", "token_count": 2696 }
378
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 고정 길이 모델의 펄플렉서티(Perplexity)[[perplexity-of-fixedlength-models]] [[open-in-colab]] 펄플렉서티(Perplexity, PPL)는 가장 일반적인 언어 모델 평가지표 중 하나입니다. 자세히 알아보기 전에 이 평가지표는 고전적인 언어 모델(자기회귀 또는 인과적 언어 모델이라고도 함)에만 적용되며 BERT와 같은 마스킹된 언어 모델에는 잘 적용하지 않습니다 (BERT는 [summary of the models](../en/model_summary) 문서를 참고하세요). 펄플렉서티는 시퀀스의 음의 로그 우도(negative log-likelihood, NLL) 값의 평균에 지수(exponentiate)를 취한 값으로 정의됩니다. 토큰화된 시퀀스 \\(X = (x_0, x_1, \dots, x_t)\\) 가 있을 때, \\(X\\) 의 펄플렉서티는 아래 수식과 같이 구할 수 있습니다. $$\text{PPL}(X) = \exp \left\{ {-\frac{1}{t}\sum_i^t \log p_\theta (x_i|x_{<i}) } \right\}$$ \\(\log p_\theta (x_i|x_{<i})\\) 는 모델에 i번째 이전까지 토큰이 주어졌을 때 i번째 토큰의 로그 우도값입니다. 직관적으로 말뭉치에서 지정된 토큰 집합을 균일하게 예측하는 모델의 능력에 대한 평가로 생각할 수 있습니다. 중요한 점은 토큰화 과정이 모델의 펄플렉서티에 직접적인 영향을 미치므로 서로 다른 모델을 비교할 때 항상 이를 고려해야 합니다. 이는 데이터와 모델 예측 간의 cross-entropy 값에 지수를 취한 것과 동일합니다. 펄플렉서티와 문자당 비트 수(BPC) 및 데이터 압축과의 관계에 대해 더 직관적인 이해를 원하신다면 다음 글 [fantastic blog post on The Gradient](https://thegradient.pub/understanding-evaluation-metrics-for-language-models/)을 확인하세요. ## 고정 길이 모델의 펄플렉서티(PPL) 계산하기[[calculating-ppl-with-fixedlength-models]] 모델의 컨텍스트 크기가 정해져있지 않다면, 아래와 같이 시퀀스를 자동 회귀적으로 분해하고 각 단계에서 선행 하는 전체 시퀀스를 조건부 확률에 넣어 모델의 펄플렉서티를 계산할 것입니다. <img width="600" alt="Full decomposition of a sequence with unlimited context length" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_full.gif"/> 그러나 모델의 근사치를 구할 때는 일반적으로 모델이 처리할 수 있는 토큰 수에 제한이 있습니다. 예를 들어, 가장 큰 버전의 [GPT-2](model_doc/gpt2)는 토큰의 길이가 1024로 고정되어 있습니다. 따라서 \\(t\\) 가 1024보다 큰 경우에 \\(p_\theta(x_t|x_{<t})\\) 을 계산할 수 없습니다. 대신 시퀀스는 일반적으로 모델의 최대 입력 크기와 동일한 길이는 가지는 부분 시퀀스로 쪼갭니다. 만약 모델의 최대 입력 길이가 \\(k\\) 라면, 토큰 \\(x_t\\) 의 우도 값을 계산할 때 이전 토큰을 모두 사용하지 않고, \\(k-1\\) 토큰까지 사용해 대략적인 우도 값을 추정합니다. 모델의 시퀀스에 대한 펄플렉서티를 계산할 때, 수월하지만 차선책은 시퀀스를 청크로 쪼개고 분해된 각 부분의 로그 우도 값을 독립적으로 합산하는 것입니다. <img width="600" alt="Suboptimal PPL not taking advantage of full available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_chunked.gif"/> 이 방법은 각 부분의 펄플렉서티를 한 번의 포워드 패스로 계산할 수 있어 빠르지만 일반적으로 더 높은(더 나쁜) PPL을 산출합니다. 왜냐하면 대부분의 예측 단계에서 모델의 컨텍스트가 적기 때문입니다. 대신, 고정 길이 모델의 PPL은 슬라이딩 윈도우 전략으로 평가해야 합니다. 이 전략에는 컨텍스트 윈도우을 반복적으로 슬라이딩해 모델이 각 예측을 수행할 때 더 많은 컨텍스트를 갖도록 하는 작업이 포함됩니다. <img width="600" alt="Sliding window PPL taking advantage of all available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_sliding.gif"/> 이는 시퀀스 확률의 실제 분해에 더 가까운 근사치이며 일반적으로 더 유리한 점수를 산출합니다. 단점은 말뭉치의 각 토큰에 대해 별도의 포워드 패스가 필요하다는 것입니다. 현실적으로 좋은 절충안은 한 번에 한 토큰씩 슬라이딩하는 것이 아니라 더 큰 간격으로 컨텍스트를 이동하는 스트라이드가 적용된 슬라이딩 윈도우을 사용하는 것입니다. 이렇게 하면 계산을 훨씬 더 빠르게 진행하면서도 모델에 각 단계에서 예측을 수행할 수 있는 긴 컨텍스트를 제공할 수 있습니다. ## 예제: 🤗 Transformers에서 GPT-2로 펄플렉서티(perplexity) 계산하기[[example-calculating-perplexity-with-gpt2-in-transformers]] 이제 GPT-2로 위의 과정을 시연해 보겠습니다. ```python from transformers import GPT2LMHeadModel, GPT2TokenizerFast device = "cuda" model_id = "openai-community/gpt2-large" model = GPT2LMHeadModel.from_pretrained(model_id).to(device) tokenizer = GPT2TokenizerFast.from_pretrained(model_id) ``` WikiText-2 데이터 세트를 가져오고 몇 가지 슬라이딩 윈도우 전략을 사용해 펄플렉서티를 계산해보겠습니다. 이 데이터 세트는 크기가 작고 포워드 패스 한 번만 수행하기 때문에 전체 데이터 세트를 메모리에 가져오고 인코딩할 수 있습니다. ```python from datasets import load_dataset test = load_dataset("wikitext", "wikitext-2-raw-v1", split="test") encodings = tokenizer("\n\n".join(test["text"]), return_tensors="pt") ``` 🤗 Transformers를 사용하면 모델의 `labels`로 `input_ids`를 전달해 각 토큰에 대한 평균 음의 우도 값을 손실로 반환할 수 있습니다. 하지만 슬라이딩 윈도우 방식을 사용하면 각 반복마다 모델에 전달하는 토큰이 겹칩니다. 컨텍스트로 처리하는 토큰에 대한 로그 우도 값이 손실에 포함되는 것을 원하지 않기 때문에 이러한 토큰의 `input_ids`를 `-100`으로 설정하여 무시할 수 있습니다. 다음은 스트라이드(stride)를 `512`로 사용한 예시입니다. 즉, 모델이 한 토큰의 조건부 우도 값을 계산할 때 컨텍스트에 최소한 512개의 토큰이 포함되어있다는 의미입니다 (해당 토큰 앞에 512개의 토큰이 있는 경우). ```python import torch from tqdm import tqdm max_length = model.config.n_positions stride = 512 seq_len = encodings.input_ids.size(1) nlls = [] prev_end_loc = 0 for begin_loc in tqdm(range(0, seq_len, stride)): end_loc = min(begin_loc + max_length, seq_len) trg_len = end_loc - prev_end_loc # 마지막 루프의 스트라이드 값과 다를 수 있음 input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device) target_ids = input_ids.clone() target_ids[:, :-trg_len] = -100 with torch.no_grad(): outputs = model(input_ids, labels=target_ids) # 손실은 모든 유효한 레이블에 대한 평균값을 구하는 교차 엔트로피(cross entropy)로 계산됩니다. # 나이브 베이지안 모델은 내부적으로 레이블을 왼쪽으로 1개씩 밀기 때문에, (타켓 - 1)개 만큼의 레이블에 대해 손실을 계산합니다. neg_log_likelihood = outputs.loss nlls.append(neg_log_likelihood) prev_end_loc = end_loc if end_loc == seq_len: break ppl = torch.exp(torch.stack(nlls).mean()) ``` 스트라이드를 최대 입력 길이와 동일하게 설정하면 위에서 설명한 차선책인 비슬라이딩 윈도우 전략과 동일합니다. 일반적으로 스트라이드가 작을수록 모델이 각 예측을 할 때 더 많은 컨텍스트를 볼 수 있게 되어 펄플렉서티 값이 좋아집니다. 위의 계산을 토큰이 겹치지 않도록 `stride = 1024`로 설정하면 PPL은 `19.44`로 GPT-2 논문에서 보고된 `19.93`과 거의 동일합니다. `stride = 512`로 슬라이딩 윈도우 전략을 사용하면 PPL은 `16.45`로 떨어집니다. 이는 더 좋은 점수일 뿐만 아니라 시퀀스 확률의 실제 자동 회귀 분해에 더 가까운 방식으로 계산됩니다.
transformers/docs/source/ko/perplexity.md/0
{ "file_path": "transformers/docs/source/ko/perplexity.md", "repo_id": "transformers", "token_count": 6268 }
379
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 자동 음성 인식[[automatic-speech-recognition]] [[open-in-colab]] <Youtube id="TksaY_FDgnk"/> 자동 음성 인식(Automatic Speech Recognition, ASR)은 음성 신호를 텍스트로 변환하여 음성 입력 시퀀스를 텍스트 출력에 매핑합니다. Siri와 Alexa와 같은 가상 어시스턴트는 ASR 모델을 사용하여 일상적으로 사용자를 돕고 있으며, 회의 중 라이브 캡션 및 메모 작성과 같은 유용한 사용자 친화적 응용 프로그램도 많이 있습니다. 이 가이드에서 소개할 내용은 아래와 같습니다: 1. [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) 데이터 세트에서 [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base)를 미세 조정하여 오디오를 텍스트로 변환합니다. 2. 미세 조정한 모델을 추론에 사용합니다. <Tip> 이 작업과 호환되는 모든 아키텍처와 체크포인트를 보려면 [작업 페이지](https://huggingface.co/tasks/automatic-speech-recognition)를 확인하는 것이 좋습니다. </Tip> 시작하기 전에 필요한 모든 라이브러리가 설치되어 있는지 확인하세요: ```bash pip install transformers datasets evaluate jiwer ``` Hugging Face 계정에 로그인하면 모델을 업로드하고 커뮤니티에 공유할 수 있습니다. 토큰을 입력하여 로그인하세요. ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## MInDS-14 데이터 세트 가져오기[[load-minds-14-dataset]] 먼저, 🤗 Datasets 라이브러리에서 [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) 데이터 세트의 일부분을 가져오세요. 이렇게 하면 전체 데이터 세트에 대한 훈련에 시간을 들이기 전에 모든 것이 작동하는지 실험하고 검증할 수 있습니다. ```py >>> from datasets import load_dataset, Audio >>> minds = load_dataset("PolyAI/minds14", name="en-US", split="train[:100]") ``` [`~Dataset.train_test_split`] 메소드를 사용하여 데이터 세트의 `train`을 훈련 세트와 테스트 세트로 나누세요: ```py >>> minds = minds.train_test_split(test_size=0.2) ``` 그리고 데이터 세트를 확인하세요: ```py >>> minds DatasetDict({ train: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 16 }) test: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 4 }) }) ``` 데이터 세트에는 `lang_id`와 `english_transcription`과 같은 유용한 정보가 많이 포함되어 있지만, 이 가이드에서는 `audio`와 `transcription`에 초점을 맞출 것입니다. 다른 열은 [`~datasets.Dataset.remove_columns`] 메소드를 사용하여 제거하세요: ```py >>> minds = minds.remove_columns(["english_transcription", "intent_class", "lang_id"]) ``` 예시를 다시 한번 확인해보세요: ```py >>> minds["train"][0] {'audio': {'array': array([-0.00024414, 0. , 0. , ..., 0.00024414, 0.00024414, 0.00024414], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'sampling_rate': 8000}, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} ``` 두 개의 필드가 있습니다: - `audio`: 오디오 파일을 가져오고 리샘플링하기 위해 호출해야 하는 음성 신호의 1차원 `array(배열)` - `transcription`: 목표 텍스트 ## 전처리[[preprocess]] 다음으로 오디오 신호를 처리하기 위한 Wav2Vec2 프로세서를 가져옵니다: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base") ``` MInDS-14 데이터 세트의 샘플링 레이트는 8000kHz이므로([데이터 세트 카드](https://huggingface.co/datasets/PolyAI/minds14)에서 확인), 사전 훈련된 Wav2Vec2 모델을 사용하려면 데이터 세트를 16000kHz로 리샘플링해야 합니다: ```py >>> minds = minds.cast_column("audio", Audio(sampling_rate=16_000)) >>> minds["train"][0] {'audio': {'array': array([-2.38064706e-04, -1.58618059e-04, -5.43987835e-06, ..., 2.78103951e-04, 2.38446111e-04, 1.18740834e-04], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'sampling_rate': 16000}, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} ``` 위의 'transcription'에서 볼 수 있듯이 텍스트는 대문자와 소문자가 섞여 있습니다. Wav2Vec2 토크나이저는 대문자 문자에 대해서만 훈련되어 있으므로 텍스트가 토크나이저의 어휘와 일치하는지 확인해야 합니다: ```py >>> def uppercase(example): ... return {"transcription": example["transcription"].upper()} >>> minds = minds.map(uppercase) ``` 이제 다음 작업을 수행할 전처리 함수를 만들어보겠습니다: 1. `audio` 열을 호출하여 오디오 파일을 가져오고 리샘플링합니다. 2. 오디오 파일에서 `input_values`를 추출하고 프로세서로 `transcription` 열을 토큰화합니다. ```py >>> def prepare_dataset(batch): ... audio = batch["audio"] ... batch = processor(audio["array"], sampling_rate=audio["sampling_rate"], text=batch["transcription"]) ... batch["input_length"] = len(batch["input_values"][0]) ... return batch ``` 전체 데이터 세트에 전처리 함수를 적용하려면 🤗 Datasets [`~datasets.Dataset.map`] 함수를 사용하세요. `num_proc` 매개변수를 사용하여 프로세스 수를 늘리면 `map`의 속도를 높일 수 있습니다. [`~datasets.Dataset.remove_columns`] 메소드를 사용하여 필요하지 않은 열을 제거하세요: ```py >>> encoded_minds = minds.map(prepare_dataset, remove_columns=minds.column_names["train"], num_proc=4) ``` 🤗 Transformers에는 자동 음성 인식용 데이터 콜레이터가 없으므로 예제 배치를 생성하려면 [`DataCollatorWithPadding`]을 조정해야 합니다. 이렇게 하면 데이터 콜레이터는 텍스트와 레이블을 배치에서 가장 긴 요소의 길이에 동적으로 패딩하여 길이를 균일하게 합니다. `tokenizer` 함수에서 `padding=True`를 설정하여 텍스트를 패딩할 수 있지만, 동적 패딩이 더 효율적입니다. 다른 데이터 콜레이터와 달리 이 특정 데이터 콜레이터는 `input_values`와 `labels`에 대해 다른 패딩 방법을 적용해야 합니다. ```py >>> import torch >>> from dataclasses import dataclass, field >>> from typing import Any, Dict, List, Optional, Union >>> @dataclass ... class DataCollatorCTCWithPadding: ... processor: AutoProcessor ... padding: Union[bool, str] = "longest" ... def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]: ... # 입력과 레이블을 분할합니다 ... # 길이가 다르고, 각각 다른 패딩 방법을 사용해야 하기 때문입니다 ... input_features = [{"input_values": feature["input_values"][0]} for feature in features] ... label_features = [{"input_ids": feature["labels"]} for feature in features] ... batch = self.processor.pad(input_features, padding=self.padding, return_tensors="pt") ... labels_batch = self.processor.pad(labels=label_features, padding=self.padding, return_tensors="pt") ... # 패딩에 대해 손실을 적용하지 않도록 -100으로 대체합니다 ... labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) ... batch["labels"] = labels ... return batch ``` 이제 `DataCollatorForCTCWithPadding`을 인스턴스화합니다: ```py >>> data_collator = DataCollatorCTCWithPadding(processor=processor, padding="longest") ``` ## 평가하기[[evaluate]] 훈련 중에 평가 지표를 포함하면 모델의 성능을 평가하는 데 도움이 되는 경우가 많습니다. 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) 라이브러리를 사용하면 평가 방법을 빠르게 불러올 수 있습니다. 이 작업에서는 [단어 오류율(Word Error Rate, WER)](https://huggingface.co/spaces/evaluate-metric/wer) 평가 지표를 가져옵니다. (평가 지표를 불러오고 계산하는 방법은 🤗 Evaluate [둘러보기](https://huggingface.co/docs/evaluate/a_quick_tour)를 참조하세요): ```py >>> import evaluate >>> wer = evaluate.load("wer") ``` 그런 다음 예측값과 레이블을 [`~evaluate.EvaluationModule.compute`]에 전달하여 WER을 계산하는 함수를 만듭니다: ```py >>> import numpy as np >>> def compute_metrics(pred): ... pred_logits = pred.predictions ... pred_ids = np.argmax(pred_logits, axis=-1) ... pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id ... pred_str = processor.batch_decode(pred_ids) ... label_str = processor.batch_decode(pred.label_ids, group_tokens=False) ... wer = wer.compute(predictions=pred_str, references=label_str) ... return {"wer": wer} ``` 이제 `compute_metrics` 함수를 사용할 준비가 되었으며, 훈련을 설정할 때 이 함수로 되돌아올 것입니다. ## 훈련하기[[train]] <frameworkcontent> <pt> <Tip> [`Trainer`]로 모델을 미세 조정하는 것이 익숙하지 않다면, [여기](../training#train-with-pytorch-trainer)에서 기본 튜토리얼을 확인해보세요! </Tip> 이제 모델 훈련을 시작할 준비가 되었습니다! [`AutoModelForCTC`]로 Wav2Vec2를 가져오세요. `ctc_loss_reduction` 매개변수로 CTC 손실에 적용할 축소(reduction) 방법을 지정하세요. 기본값인 합계 대신 평균을 사용하는 것이 더 좋은 경우가 많습니다: ```py >>> from transformers import AutoModelForCTC, TrainingArguments, Trainer >>> model = AutoModelForCTC.from_pretrained( ... "facebook/wav2vec2-base", ... ctc_loss_reduction="mean", ... pad_token_id=processor.tokenizer.pad_token_id, ... ) ``` 이제 세 단계만 남았습니다: 1. [`TrainingArguments`]에서 훈련 하이퍼파라미터를 정의하세요. `output_dir`은 모델을 저장할 경로를 지정하는 유일한 필수 매개변수입니다. `push_to_hub=True`를 설정하여 모델을 Hub에 업로드 할 수 있습니다(모델을 업로드하려면 Hugging Face에 로그인해야 합니다). [`Trainer`]는 각 에폭마다 WER을 평가하고 훈련 체크포인트를 저장합니다. 2. 모델, 데이터 세트, 토크나이저, 데이터 콜레이터, `compute_metrics` 함수와 함께 [`Trainer`]에 훈련 인수를 전달하세요. 3. [`~Trainer.train`]을 호출하여 모델을 미세 조정하세요. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_asr_mind_model", ... per_device_train_batch_size=8, ... gradient_accumulation_steps=2, ... learning_rate=1e-5, ... warmup_steps=500, ... max_steps=2000, ... gradient_checkpointing=True, ... fp16=True, ... group_by_length=True, ... eval_strategy="steps", ... per_device_eval_batch_size=8, ... save_steps=1000, ... eval_steps=1000, ... logging_steps=25, ... load_best_model_at_end=True, ... metric_for_best_model="wer", ... greater_is_better=False, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=encoded_minds["train"], ... eval_dataset=encoded_minds["test"], ... processing_class=processor.feature_extractor, ... data_collator=data_collator, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` 훈련이 완료되면 모두가 모델을 사용할 수 있도록 [`~transformers.Trainer.push_to_hub`] 메소드를 사용하여 모델을 Hub에 공유하세요: ```py >>> trainer.push_to_hub() ``` </pt> </frameworkcontent> <Tip> 자동 음성 인식을 위해 모델을 미세 조정하는 더 자세한 예제는 영어 자동 음성 인식을 위한 [블로그 포스트](https://huggingface.co/blog/fine-tune-wav2vec2-english)와 다국어 자동 음성 인식을 위한 [포스트](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2)를 참조하세요. </Tip> ## 추론하기[[inference]] 좋아요, 이제 모델을 미세 조정했으니 추론에 사용할 수 있습니다! 추론에 사용할 오디오 파일을 가져오세요. 필요한 경우 오디오 파일의 샘플링 비율을 모델의 샘플링 레이트에 맞게 리샘플링하는 것을 잊지 마세요! ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) >>> sampling_rate = dataset.features["audio"].sampling_rate >>> audio_file = dataset[0]["audio"]["path"] ``` 추론을 위해 미세 조정된 모델을 시험해보는 가장 간단한 방법은 [`pipeline`]을 사용하는 것입니다. 모델을 사용하여 자동 음성 인식을 위한 `pipeline`을 인스턴스화하고 오디오 파일을 전달하세요: ```py >>> from transformers import pipeline >>> transcriber = pipeline("automatic-speech-recognition", model="stevhliu/my_awesome_asr_minds_model") >>> transcriber(audio_file) {'text': 'I WOUD LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'} ``` <Tip> 텍스트로 변환된 결과가 꽤 괜찮지만 더 좋을 수도 있습니다! 더 나은 결과를 얻으려면 더 많은 예제로 모델을 미세 조정하세요! </Tip> `pipeline`의 결과를 수동으로 재현할 수도 있습니다: <frameworkcontent> <pt> 오디오 파일과 텍스트를 전처리하고 PyTorch 텐서로 `input`을 반환할 프로세서를 가져오세요: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("stevhliu/my_awesome_asr_mind_model") >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") ``` 입력을 모델에 전달하고 로짓을 반환하세요: ```py >>> from transformers import AutoModelForCTC >>> model = AutoModelForCTC.from_pretrained("stevhliu/my_awesome_asr_mind_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` 가장 높은 확률의 `input_ids`를 예측하고, 프로세서를 사용하여 예측된 `input_ids`를 다시 텍스트로 디코딩하세요: ```py >>> import torch >>> predicted_ids = torch.argmax(logits, dim=-1) >>> transcription = processor.batch_decode(predicted_ids) >>> transcription ['I WOUL LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'] ``` </pt> </frameworkcontent>
transformers/docs/source/ko/tasks/asr.md/0
{ "file_path": "transformers/docs/source/ko/tasks/asr.md", "repo_id": "transformers", "token_count": 9518 }
380
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 대규모 언어 모델(LLM) 프롬프팅 가이드 [[llm-prompting-guide]] [[open-in-colab]] Falcon, LLaMA 등의 대규모 언어 모델은 사전 훈련된 트랜스포머 모델로, 초기에는 주어진 입력 텍스트에 대해 다음 토큰을 예측하도록 훈련됩니다. 이들은 보통 수십억 개의 매개변수를 가지고 있으며, 장기간에 걸쳐 수조 개의 토큰으로 훈련됩니다. 그 결과, 이 모델들은 매우 강력하고 다재다능해져서, 자연어 프롬프트로 모델에 지시하여 다양한 자연어 처리 작업을 즉시 수행할 수 있습니다. 최적의 출력을 보장하기 위해 이러한 프롬프트를 설계하는 것을 흔히 "프롬프트 엔지니어링"이라고 합니다. 프롬프트 엔지니어링은 상당한 실험이 필요한 반복적인 과정입니다. 자연어는 프로그래밍 언어보다 훨씬 유연하고 표현력이 풍부하지만, 동시에 모호성을 초래할 수 있습니다. 또한, 자연어 프롬프트는 변화에 매우 민감합니다. 프롬프트의 사소한 수정만으로도 완전히 다른 출력이 나올 수 있습니다. 모든 경우에 적용할 수 있는 정확한 프롬프트 생성 공식은 없지만, 연구자들은 더 일관되게 최적의 결과를 얻는 데 도움이 되는 여러 가지 모범 사례를 개발했습니다. 이 가이드에서는 더 나은 대규모 언어 모델 프롬프트를 작성하고 다양한 자연어 처리 작업을 해결하는 데 도움이 되는 프롬프트 엔지니어링 모범 사례를 다룹니다: - [프롬프팅의 기초](#basics-of-prompting) - [대규모 언어 모델 프롬프팅의 모범 사례](#best-practices-of-llm-prompting) - [고급 프롬프팅 기법: 퓨샷(Few-shot) 프롬프팅과 생각의 사슬(Chain-of-thought, CoT) 기법](#advanced-prompting-techniques) - [프롬프팅 대신 미세 조정을 해야 하는 경우](#prompting-vs-fine-tuning) <Tip> 프롬프트 엔지니어링은 대규모 언어 모델 출력 최적화 과정의 일부일 뿐입니다. 또 다른 중요한 구성 요소는 최적의 텍스트 생성 전략을 선택하는 것입니다. 학습 가능한 매개변수를 수정하지 않고도 대규모 언어 모델이 텍스트를 생성하리 때 각각의 후속 토큰을 선택하는 방식을 사용자가 직접 정의할 수 있습니다. 텍스트 생성 매개변수를 조정함으로써 생성된 텍스트의 반복을 줄이고 더 일관되고 사람이 말하는 것 같은 텍스트를 만들 수 있습니다. 텍스트 생성 전략과 매개변수는 이 가이드의 범위를 벗어나지만, 다음 가이드에서 이러한 주제에 대해 자세히 알아볼 수 있습니다: * [대규모 언어 모델을 이용한 생성](../llm_tutorial) * [텍스트 생성 전략](../generation_strategies) </Tip> ## 프롬프팅의 기초 [[basics-of-prompting]] ### 모델의 유형 [[types-of-models]] 현대의 대부분의 대규모 언어 모델은 디코더만을 이용한 트랜스포머입니다. 예를 들어 [LLaMA](../model_doc/llama), [Llama2](../model_doc/llama2), [Falcon](../model_doc/falcon), [GPT2](../model_doc/gpt2) 등이 있습니다. 그러나 [Flan-T5](../model_doc/flan-t5)와 [BART](../model_doc/bart)와 같은 인코더-디코더 기반의 트랜스포머 대규모 언어 모델을 접할 수도 있습니다. 인코더-디코더 기반의 모델은 일반적으로 출력이 입력에 **크게** 의존하는 생성 작업에 사용됩니다. 예를 들어, 번역과 요약 작업에 사용됩니다. 디코더 전용 모델은 다른 모든 유형의 생성 작업에 사용됩니다. 파이프라인을 사용하여 대규모 언어 모델으로 텍스트를 생성할 때, 어떤 유형의 대규모 언어 모델을 사용하고 있는지 아는 것이 중요합니다. 왜냐하면 이들은 서로 다른 파이프라인을 사용하기 때문입니다. 디코더 전용 모델로 추론을 실행하려면 `text-generation` 파이프라인을 사용하세요: ```python >>> from transformers import pipeline >>> import torch >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> generator = pipeline('text-generation', model = 'openai-community/gpt2') >>> prompt = "Hello, I'm a language model" >>> generator(prompt, max_length = 30) [{'generated_text': "Hello, I'm a language model programmer so you can use some of my stuff. But you also need some sort of a C program to run."}] ``` 인코더-디코더로 추론을 실행하려면 `text2text-generation` 파이프라인을 사용하세요: ```python >>> text2text_generator = pipeline("text2text-generation", model = 'google/flan-t5-base') >>> prompt = "Translate from English to French: I'm very happy to see you" >>> text2text_generator(prompt) [{'generated_text': 'Je suis très heureuse de vous rencontrer.'}] ``` ### 기본 모델 vs 지시/채팅 모델 [[base-vs-instructchat-models]] 🤗 Hub에서 최근 사용 가능한 대부분의 대규모 언어 모델 체크포인트는 기본 버전과 지시(또는 채팅) 두 가지 버전이 제공됩니다. 예를 들어, [`tiiuae/falcon-7b`](https://huggingface.co/tiiuae/falcon-7b)와 [`tiiuae/falcon-7b-instruct`](https://huggingface.co/tiiuae/falcon-7b-instruct)가 있습니다. 기본 모델은 초기 프롬프트가 주어졌을 때 텍스트를 완성하는 데 탁월하지만, 지시를 따라야 하거나 대화형 사용이 필요한 자연어 처리작업에는 이상적이지 않습니다. 이때 지시(채팅) 버전이 필요합니다. 이러한 체크포인트는 사전 훈련된 기본 버전을 지시사항과 대화 데이터로 추가 미세 조정한 결과입니다. 이 추가적인 미세 조정으로 인해 많은 자연어 처리 작업에 더 적합한 선택이 됩니다. [`tiiuae/falcon-7b-instruct`](https://huggingface.co/tiiuae/falcon-7b-instruct)를 사용하여 일반적인 자연어 처리 작업을 해결하는 데 사용할 수 있는 몇 가지 간단한 프롬프트를 살펴보겠습니다. ### 자연어 처리 작업 [[nlp-tasks]] 먼저, 환경을 설정해 보겠습니다: ```bash pip install -q transformers accelerate ``` 다음으로, 적절한 파이프라인("text-generation")을 사용하여 모델을 로드하겠습니다: ```python >>> from transformers import pipeline, AutoTokenizer >>> import torch >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> model = "tiiuae/falcon-7b-instruct" >>> tokenizer = AutoTokenizer.from_pretrained(model) >>> pipe = pipeline( ... "text-generation", ... model=model, ... tokenizer=tokenizer, ... dtype=torch.bfloat16, ... device_map="auto", ... ) ``` <Tip> Falcon 모델은 bfloat16 데이터 타입을 사용하여 훈련되었으므로, 같은 타입을 사용하는 것을 권장합니다. 이를 위해서는 최신 버전의 CUDA가 필요하며, 최신 그래픽 카드에서 가장 잘 작동합니다. </Tip> 이제 파이프라인을 통해 모델을 로드했으니, 프롬프트를 사용하여 자연어 처리 작업을 해결하는 방법을 살펴보겠습니다. #### 텍스트 분류 [[text-classification]] 텍스트 분류의 가장 일반적인 형태 중 하나는 감정 분석입니다. 이는 텍스트 시퀀스에 "긍정적", "부정적" 또는 "중립적"과 같은 레이블을 할당합니다. 주어진 텍스트(영화 리뷰)를 분류하도록 모델에 지시하는 프롬프트를 작성해 보겠습니다. 먼저 지시사항을 제공한 다음, 분류할 텍스트를 지정하겠습니다. 여기서 주목할 점은 단순히 거기서 끝내지 않고, 응답의 시작 부분인 `"Sentiment: "`을 추가한다는 것입니다: ```python >>> torch.manual_seed(0) >>> prompt = """Classify the text into neutral, negative or positive. ... Text: This movie is definitely one of my favorite movies of its kind. The interaction between respectable and morally strong characters is an ode to chivalry and the honor code amongst thieves and policemen. ... Sentiment: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=10, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: Classify the text into neutral, negative or positive. Text: This movie is definitely one of my favorite movies of its kind. The interaction between respectable and morally strong characters is an ode to chivalry and the honor code amongst thieves and policemen. Sentiment: Positive ``` 결과적으로, 우리가 지시사항에서 제공한 목록에서 선택된 분류 레이블이 정확하게 포함되어 생성된 것을 확인할 수 있습니다! <Tip> 프롬프트 외에도 `max_new_tokens` 매개변수를 전달하는 것을 볼 수 있습니다. 이 매개변수는 모델이 생성할 토큰의 수를 제어하며, [텍스트 생성 전략](../generation_strategies) 가이드에서 배울 수 있는 여러 텍스트 생성 매개변수 중 하나입니다. </Tip> #### 개체명 인식 [[named-entity-recognition]] 개체명 인식(Named Entity Recognition, NER)은 텍스트에서 인물, 장소, 조직과 같은 명명된 개체를 찾는 작업입니다. 프롬프트의 지시사항을 수정하여 대규모 언어 모델이 이 작업을 수행하도록 해보겠습니다. 여기서는 `return_full_text = False`로 설정하여 출력에 프롬프트가 포함되지 않도록 하겠습니다: ```python >>> torch.manual_seed(1) # doctest: +IGNORE_RESULT >>> prompt = """Return a list of named entities in the text. ... Text: The Golden State Warriors are an American professional basketball team based in San Francisco. ... Named entities: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=15, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"{seq['generated_text']}") - Golden State Warriors - San Francisco ``` 보시다시피, 모델이 주어진 텍스트에서 두 개의 명명된 개체를 정확하게 식별했습니다. #### 번역 [[translation]] 대규모 언어 모델이 수행할 수 있는 또 다른 작업은 번역입니다. 이 작업을 위해 인코더-디코더 모델을 사용할 수 있지만, 여기서는 예시의 단순성을 위해 꽤 좋은 성능을 보이는 Falcon-7b-instruct를 계속 사용하겠습니다. 다시 한 번, 모델에게 영어에서 이탈리아어로 텍스트를 번역하도록 지시하는 기본적인 프롬프트를 작성하는 방법은 다음과 같습니다: ```python >>> torch.manual_seed(2) # doctest: +IGNORE_RESULT >>> prompt = """Translate the English text to Italian. ... Text: Sometimes, I've believed as many as six impossible things before breakfast. ... Translation: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=20, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"{seq['generated_text']}") A volte, ho creduto a sei impossibili cose prima di colazione. ``` 여기서는 모델이 출력을 생성할 때 조금 더 유연해질 수 있도록 `do_sample=True`와 `top_k=10`을 추가했습니다. #### 텍스트 요약 [[text-summarization]] 번역과 마찬가지로, 텍스트 요약은 출력이 입력에 크게 의존하는 또 다른 생성 작업이며, 인코더-디코더 기반 모델이 더 나은 선택일 수 있습니다. 그러나 디코더 기반의 모델도 이 작업에 사용될 수 있습니다. 이전에는 프롬프트의 맨 처음에 지시사항을 배치했습니다. 하지만 프롬프트의 맨 끝도 지시사항을 넣을 적절한 위치가 될 수 있습니다. 일반적으로 지시사항을 양 극단 중 하나에 배치하는 것이 더 좋습니다. ```python >>> torch.manual_seed(3) # doctest: +IGNORE_RESULT >>> prompt = """Permaculture is a design process mimicking the diversity, functionality and resilience of natural ecosystems. The principles and practices are drawn from traditional ecological knowledge of indigenous cultures combined with modern scientific understanding and technological innovations. Permaculture design provides a framework helping individuals and communities develop innovative, creative and effective strategies for meeting basic needs while preparing for and mitigating the projected impacts of climate change. ... Write a summary of the above text. ... Summary: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=30, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"{seq['generated_text']}") Permaculture is an ecological design mimicking natural ecosystems to meet basic needs and prepare for climate change. It is based on traditional knowledge and scientific understanding. ``` #### 질의 응답 [[question-answering]] 질의 응답 작업을 위해 프롬프트를 다음과 같은 논리적 구성요소로 구조화할 수 있습니다. 지시사항, 맥락, 질문, 그리고 모델이 답변 생성을 시작하도록 유도하는 선도 단어나 구문(`"Answer:"`) 을 사용할 수 있습니다: ```python >>> torch.manual_seed(4) # doctest: +IGNORE_RESULT >>> prompt = """Answer the question using the context below. ... Context: Gazpacho is a cold soup and drink made of raw, blended vegetables. Most gazpacho includes stale bread, tomato, cucumbers, onion, bell peppers, garlic, olive oil, wine vinegar, water, and salt. Northern recipes often include cumin and/or pimentón (smoked sweet paprika). Traditionally, gazpacho was made by pounding the vegetables in a mortar with a pestle; this more laborious method is still sometimes used as it helps keep the gazpacho cool and avoids the foam and silky consistency of smoothie versions made in blenders or food processors. ... Question: What modern tool is used to make gazpacho? ... Answer: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=10, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: Modern tools often used to make gazpacho include ``` #### 추론 [[reasoning]] 추론은 대규모 언어 모델(LLM)에게 가장 어려운 작업 중 하나이며, 좋은 결과를 얻기 위해서는 종종 [생각의 사슬(Chain-of-thought, CoT)](#chain-of-thought)과 같은 고급 프롬프팅 기법을 적용해야 합니다. 간단한 산술 작업에 대해 기본적인 프롬프트로 모델이 추론할 수 있는지 시도해 보겠습니다: ```python >>> torch.manual_seed(5) # doctest: +IGNORE_RESULT >>> prompt = """There are 5 groups of students in the class. Each group has 4 students. How many students are there in the class?""" >>> sequences = pipe( ... prompt, ... max_new_tokens=30, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: There are a total of 5 groups, so there are 5 x 4=20 students in the class. ``` 정확한 답변이 생성되었습니다! 복잡성을 조금 높여보고 기본적인 프롬프트로도 여전히 해결할 수 있는지 확인해 보겠습니다: ```python >>> torch.manual_seed(6) >>> prompt = """I baked 15 muffins. I ate 2 muffins and gave 5 muffins to a neighbor. My partner then bought 6 more muffins and ate 2. How many muffins do we now have?""" >>> sequences = pipe( ... prompt, ... max_new_tokens=10, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: The total number of muffins now is 21 ``` 정답은 12여야 하는데 21이라는 잘못된 답변이 나왔습니다. 이 경우, 프롬프트가 너무 기본적이거나 모델의 크기가 작아서 생긴 문제일 수 있습니다. 우리는 Falcon의 가장 작은 버전을 선택했습니다. 추론은 큰 모델에게도 어려운 작업이지만, 더 큰 모델들이 더 나은 성능을 보일 가능성이 높습니다. ## 대규모 언어 모델 프롬프트 작성의 모범 사례 [[best-practices-of-llm-prompting]] 이 섹션에서는 프롬프트 결과를 향상시킬 수 있는 모범 사례 목록을 작성했습니다: * 작업할 모델을 선택할 때 최신 및 가장 강력한 모델이 더 나은 성능을 발휘할 가능성이 높습니다. * 간단하고 짧은 프롬프트로 시작하여 점진적으로 개선해 나가세요. * 프롬프트의 시작 부분이나 맨 끝에 지시사항을 배치하세요. 대규모 컨텍스트를 다룰 때, 모델들은 어텐션 복잡도가 2차적으로 증가하는 것을 방지하기 위해 다양한 최적화를 적용합니다. 이렇게 함으로써 모델이 프롬프트의 중간보다 시작이나 끝 부분에 더 주의를 기울일 수 있습니다. * 지시사항을 적용할 텍스트와 명확하게 분리해보세요. (이에 대해서는 다음 섹션에서 더 자세히 다룹니다.) * 작업과 원하는 결과에 대해 구체적이고 풍부한 설명을 제공하세요. 형식, 길이, 스타일, 언어 등을 명확하게 작성해야 합니다. * 모호한 설명과 지시사항을 피하세요. * "하지 말라"는 지시보다는 "무엇을 해야 하는지"를 말하는 지시를 사용하는 것이 좋습니다. * 첫 번째 단어를 쓰거나 첫 번째 문장을 시작하여 출력을 올바른 방향으로 "유도"하세요. * [퓨샷(Few-shot) 프롬프팅](#few-shot-prompting) 및 [생각의 사슬(Chain-of-thought, CoT)](#chain-of-thought) 같은 고급 기술을 사용해보세요. * 프롬프트의 견고성을 평가하기 위해 다른 모델로도 테스트하세요. * 프롬프트의 버전을 관리하고 성능을 추적하세요. ## 고급 프롬프트 기법 [[advanced-prompting-techniques]] ### 퓨샷(Few-shot) 프롬프팅 [[few-shot-prompting]] 위 섹션의 기본 프롬프트들은 "제로샷(Zero-shot)" 프롬프트의 예시입니다. 이는 모델에 지시사항과 맥락은 주어졌지만, 해결책이 포함된 예시는 제공되지 않았다는 의미입니다. 지시 데이터셋으로 미세 조정된 대규모 언어 모델은 일반적으로 이러한 "제로샷" 작업에서 좋은 성능을 보입니다. 하지만 여러분의 작업이 더 복잡하거나 미묘한 차이가 있을 수 있고, 아마도 지시사항만으로는 모델이 포착하지 못하는 출력에 대한 요구사항이 있을 수 있습니다. 이런 경우에는 퓨샷(Few-shot) 프롬프팅이라는 기법을 시도해 볼 수 있습니다. 퓨샷 프롬프팅에서는 프롬프트에 예시를 제공하여 모델에 더 많은 맥락을 주고 성능을 향상시킵니다. 이 예시들은 모델이 예시의 패턴을 따라 출력을 생성하도록 조건화합니다. 다음은 예시입니다: ```python >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> prompt = """Text: The first human went into space and orbited the Earth on April 12, 1961. ... Date: 04/12/1961 ... Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon. ... Date:""" >>> sequences = pipe( ... prompt, ... max_new_tokens=8, ... do_sample=True, ... top_k=10, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: Text: The first human went into space and orbited the Earth on April 12, 1961. Date: 04/12/1961 Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon. Date: 09/28/1960 ``` 위의 코드 스니펫에서는 모델에 원하는 출력을 보여주기 위해 단일 예시를 사용했으므로, 이를 "원샷(One-shot)" 프롬프팅이라고 부를 수 있습니다. 그러나 작업의 복잡성에 따라 하나 이상의 예시를 사용해야 할 수도 있습니다. 퓨샷 프롬프팅 기법의 한계: - 대규모 언어 모델이 예시의 패턴을 파악할 수 있지만, 이 기법은 복잡한 추론 작업에는 잘 작동하지 않습니다. - 퓨샷 프롬프팅을 적용하면 프롬프트의 길이가 길어집니다. 토큰 수가 많은 프롬프트는 계산량과 지연 시간을 증가시킬 수 있으며 프롬프트 길이에도 제한이 있습니다. - 때로는 여러 예시가 주어질 때, 모델은 의도하지 않은 패턴을 학습할 수 있습니다. 예를 들어, 세 번째 영화 리뷰가 항상 부정적이라고 학습할 수 있습니다. ### 생각의 사슬(Chain-of-thought, CoT) [[chain-of-thought]] 생각의 사슬(Chain-of-thought, CoT) 프롬프팅은 모델이 중간 추론 단계를 생성하도록 유도하는 기법으로, 복잡한 추론 작업의 결과를 개선합니다. 모델이 추론 단계를 생성하도록 유도하는 두 가지 방법이 있습니다: - 질문에 대한 상세한 답변을 예시로 제시하는 퓨샷 프롬프팅을 통해 모델에게 문제를 어떻게 해결해 나가는지 보여줍니다. - "단계별로 생각해 봅시다" 또는 "깊게 숨을 쉬고 문제를 단계별로 해결해 봅시다"와 같은 문구를 추가하여 모델에게 추론하도록 지시합니다. [reasoning section](#reasoning)의 머핀 예시에 생각의 사슬(Chain-of-thought, CoT) 기법을 적용하고 [HuggingChat](https://huggingface.co/chat/)에서 사용할 수 있는 (`tiiuae/falcon-180B-chat`)과 같은 더 큰 모델을 사용하면, 추론 결과가 크게 개선됩니다: ```text 단계별로 살펴봅시다: 1. 처음에 15개의 머핀이 있습니다. 2. 2개의 머핀을 먹으면 13개의 머핀이 남습니다. 3. 이웃에게 5개의 머핀을 주면 8개의 머핀이 남습니다. 4. 파트너가 6개의 머핀을 더 사오면 총 머핀 수는 14개가 됩니다. 5. 파트너가 2개의 머핀을 먹으면 12개의 머핀이 남습니다. 따라서, 현재 12개의 머핀이 있습니다. ``` ## 프롬프팅 vs 미세 조정 [[prompting-vs-fine-tuning]] 프롬프트를 최적화하여 훌륭한 결과를 얻을 수 있지만, 여전히 모델을 미세 조정하는 것이 더 좋을지 고민할 수 있습니다. 다음은 더 작은 모델을 미세 조정하는 것이 선호되는 시나리오입니다: - 도메인이 대규모 언어 모델이 사전 훈련된 것과 크게 다르고 광범위한 프롬프트 최적화로도 충분한 결과를 얻지 못한 경우. - 저자원 언어에서 모델이 잘 작동해야 하는 경우. - 엄격한 규제 하에 있는 민감한 데이터로 모델을 훈련해야 하는 경우. - 비용, 개인정보 보호, 인프라 또는 기타 제한으로 인해 작은 모델을 사용해야 하는 경우. 위의 모든 예시에서, 모델을 미세 조정하기 위해 충분히 큰 도메인별 데이터셋을 이미 가지고 있거나 합리적인 비용으로 쉽게 얻을 수 있는지 확인해야 합니다. 또한 모델을 미세 조정할 충분한 시간과 자원이 필요합니다. 만약 위의 예시들이 여러분의 경우에 해당하지 않는다면, 프롬프트를 최적화하는 것이 더 유익할 수 있습니다.
transformers/docs/source/ko/tasks/prompting.md/0
{ "file_path": "transformers/docs/source/ko/tasks/prompting.md", "repo_id": "transformers", "token_count": 15928 }
381
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Trainer [[trainer]] [`Trainer`]는 Transformers 라이브러리에 구현된 PyTorch 모델을 반복하여 훈련 및 평가 과정입니다. 훈련에 필요한 요소(모델, 토크나이저, 데이터셋, 평가 함수, 훈련 하이퍼파라미터 등)만 제공하면 [`Trainer`]가 필요한 나머지 작업을 처리합니다. 이를 통해 직접 훈련 루프를 작성하지 않고도 빠르게 훈련을 시작할 수 있습니다. 또한 [`Trainer`]는 강력한 맞춤 설정과 다양한 훈련 옵션을 제공하여 사용자 맞춤 훈련이 가능합니다. <Tip> Transformers는 [`Trainer`] 클래스 외에도 번역이나 요약과 같은 시퀀스-투-시퀀스 작업을 위한 [`Seq2SeqTrainer`] 클래스도 제공합니다. 또한 [TRL](https://hf.co/docs/trl) 라이브러리에는 [`Trainer`] 클래스를 감싸고 Llama-2 및 Mistral과 같은 언어 모델을 자동 회귀 기법으로 훈련하는 데 최적화된 [`~trl.SFTTrainer`] 클래스 입니다. [`~trl.SFTTrainer`]는 시퀀스 패킹, LoRA, 양자화 및 DeepSpeed와 같은 기능을 지원하여 크기 상관없이 모델 효율적으로 확장할 수 있습니다. <br> 이들 다른 [`Trainer`] 유형 클래스에 대해 더 알고 싶다면 [API 참조](./main_classes/trainer)를 확인하여 언제 어떤 클래스가 적합할지 얼마든지 확인하세요. 일반적으로 [`Trainer`]는 가장 다재다능한 옵션으로, 다양한 작업에 적합합니다. [`Seq2SeqTrainer`]는 시퀀스-투-시퀀스 작업을 위해 설계되었고, [`~trl.SFTTrainer`]는 언어 모델 훈련을 위해 설계되었습니다. </Tip> 시작하기 전에, 분산 환경에서 PyTorch 훈련과 실행을 할 수 있게 [Accelerate](https://hf.co/docs/accelerate) 라이브러리가 설치되었는지 확인하세요. ```bash pip install accelerate # 업그레이드 pip install accelerate --upgrade ``` 이 가이드는 [`Trainer`] 클래스에 대한 개요를 제공합니다. ## 기본 사용법 [[basic-usage]] [`Trainer`]는 기본적인 훈련 루프에 필요한 모든 코드를 포함하고 있습니다. 1. 손실을 계산하는 훈련 단계를 수행합니다. 2. [`~accelerate.Accelerator.backward`] 메소드로 그레이디언트를 계산합니다. 3. 그레이디언트를 기반으로 가중치를 업데이트합니다. 4. 정해진 에폭 수에 도달할 때까지 이 과정을 반복합니다. [`Trainer`] 클래스는 PyTorch와 훈련 과정에 익숙하지 않거나 막 시작한 경우에도 훈련이 가능하도록 필요한 모든 코드를 추상화하였습니다. 또한 매번 훈련 루프를 손수 작성하지 않아도 되며, 훈련에 필요한 모델과 데이터셋 같은 필수 구성 요소만 제공하면, [Trainer] 클래스가 나머지를 처리합니다. 훈련 옵션이나 하이퍼파라미터를 지정하려면, [`TrainingArguments`] 클래스에서 확인 할 수 있습니다. 예를 들어, 모델을 저장할 디렉토리를 `output_dir`에 정의하고, 훈련 후에 Hub로 모델을 푸시하려면 `push_to_hub=True`로 설정합니다. ```py from transformers import TrainingArguments training_args = TrainingArguments( output_dir="your-model", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=2, weight_decay=0.01, eval_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, push_to_hub=True, ) ``` `training_args`를 [`Trainer`]에 모델, 데이터셋, 데이터셋 전처리 도구(데이터 유형에 따라 토크나이저, 특징 추출기 또는 이미지 프로세서일 수 있음), 데이터 수집기 및 훈련 중 확인할 지표를 계산할 함수를 함께 전달하세요. 마지막으로, [`~Trainer.train`]를 호출하여 훈련을 시작하세요! ```py from transformers import Trainer trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"], eval_dataset=dataset["test"], tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) trainer.train() ``` ### 체크포인트 [[checkpoints]] [`Trainer`] 클래스는 [`TrainingArguments`]의 `output_dir` 매개변수에 지정된 디렉토리에 모델 체크포인트를 저장합니다. 체크포인트는 `checkpoint-000` 하위 폴더에 저장되며, 여기서 끝의 숫자는 훈련 단계에 해당합니다. 체크포인트를 저장하면 나중에 훈련을 재개할 때 유용합니다. ```py # 최신 체크포인트에서 재개 trainer.train(resume_from_checkpoint=True) # 출력 디렉토리에 저장된 특정 체크포인트에서 재개 trainer.train(resume_from_checkpoint="your-model/checkpoint-1000") ``` 체크포인트를 Hub에 푸시하려면 [`TrainingArguments`]에서 `push_to_hub=True`로 설정하여 커밋하고 푸시할 수 있습니다. 체크포인트 저장 방법을 결정하는 다른 옵션은 [`hub_strategy`](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.hub_strategy) 매개변수에서 설정합니다: * `hub_strategy="checkpoint"`는 최신 체크포인트를 "last-checkpoint"라는 하위 폴더에 푸시하여 훈련을 재개할 수 있습니다. * `hub_strategy="all_checkpoints"`는 모든 체크포인트를 `output_dir`에 정의된 디렉토리에 푸시합니다(모델 리포지토리에서 폴더당 하나의 체크포인트를 볼 수 있습니다). 체크포인트에서 훈련을 재개할 때, [`Trainer`]는 체크포인트가 저장될 때와 동일한 Python, NumPy 및 PyTorch RNG 상태를 유지하려고 합니다. 하지만 PyTorch는 기본 설정으로 '일관된 결과를 보장하지 않음'으로 많이 되어있기 때문에, RNG 상태가 동일할 것이라고 보장할 수 없습니다. 따라서, 일관된 결과가 보장되도록 활성화 하려면, [랜덤성 제어](https://pytorch.org/docs/stable/notes/randomness#controlling-sources-of-randomness) 가이드를 참고하여 훈련을 완전히 일관된 결과를 보장 받도록 만들기 위해 활성화할 수 있는 항목을 확인하세요. 다만, 특정 설정을 결정적으로 만들면 훈련이 느려질 수 있습니다. ## Trainer 맞춤 설정 [[customize-the-trainer]] [`Trainer`] 클래스는 접근성과 용이성을 염두에 두고 설계되었지만, 더 다양한 기능을 원하는 사용자들을 위해 다양한 맞춤 설정 옵션을 제공합니다. [`Trainer`]의 많은 메소드는 서브클래스화 및 오버라이드하여 원하는 기능을 제공할 수 있으며, 이를 통해 전체 훈련 루프를 다시 작성할 필요 없이 원하는 기능을 추가할 수 있습니다. 이러한 메소드에는 다음이 포함됩니다: * [`~Trainer.get_train_dataloader`]는 훈련 데이터로더를 생성합니다. * [`~Trainer.get_eval_dataloader`]는 평가 데이터로더를 생성합니다. * [`~Trainer.get_test_dataloader`]는 테스트 데이터로더를 생성합니다. * [`~Trainer.log`]는 훈련을 모니터링하는 다양한 객체에 대한 정보를 로그로 남깁니다. * [`~Trainer.create_optimizer_and_scheduler`]는 `__init__`에서 전달되지 않은 경우 옵티마이저와 학습률 스케줄러를 생성합니다. 이들은 각각 [`~Trainer.create_optimizer`] 및 [`~Trainer.create_scheduler`]로 별도로 맞춤 설정 할 수 있습니다. * [`~Trainer.compute_loss`]는 훈련 입력 배치에 대한 손실을 계산합니다. * [`~Trainer.training_step`]는 훈련 단계를 수행합니다. * [`~Trainer.prediction_step`]는 예측 및 테스트 단계를 수행합니다. * [`~Trainer.evaluate`]는 모델을 평가하고 평가 지표을 반환합니다. * [`~Trainer.predict`]는 테스트 세트에 대한 예측(레이블이 있는 경우 지표 포함)을 수행합니다. 예를 들어, [`~Trainer.compute_loss`] 메소드를 맞춤 설정하여 가중 손실을 사용하려는 경우: ```py from torch import nn from transformers import Trainer class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): labels = inputs.pop("labels") # 순방향 전파 outputs = model(**inputs) logits = outputs.get("logits") # 서로 다른 가중치로 3개의 레이블에 대한 사용자 정의 손실을 계산 loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([1.0, 2.0, 3.0], device=model.device)) loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1)) return (loss, outputs) if return_outputs else loss ``` ### 콜백 [[callbacks]] [`Trainer`]를 맞춤 설정하는 또 다른 방법은 [콜백](callbacks)을 사용하는 것입니다. 콜백은 훈련 루프에서 *변화를 주지 않습니다*. 훈련 루프의 상태를 검사한 후 상태에 따라 일부 작업(조기 종료, 결과 로그 등)을 실행합니다. 즉, 콜백은 사용자 정의 손실 함수와 같은 것을 구현하는 데 사용할 수 없으며, 이를 위해서는 [`~Trainer.compute_loss`] 메소드를 서브클래스화하고 오버라이드해야 합니다. 예를 들어, 훈련 루프에 10단계 후 조기 종료 콜백을 추가하려면 다음과 같이 합니다. ```py from transformers import TrainerCallback class EarlyStoppingCallback(TrainerCallback): def __init__(self, num_steps=10): self.num_steps = num_steps def on_step_end(self, args, state, control, **kwargs): if state.global_step >= self.num_steps: return {"should_training_stop": True} else: return {} ``` 그런 다음, 이를 [`Trainer`]의 `callback` 매개변수에 전달합니다. ```py from transformers import Trainer trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"], eval_dataset=dataset["test"], tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, callbacks=[EarlyStoppingCallback()], ) ``` ## 로깅 [[logging]] <Tip> 로깅 API에 대한 자세한 내용은 [로깅](./main_classes/logging) API 레퍼런스를 확인하세요. </Tip> [`Trainer`]는 기본적으로 `logging.INFO`로 설정되어 있어 오류, 경고 및 기타 기본 정보를 보고합니다. 분산 환경에서는 [`Trainer`] 복제본이 `logging.WARNING`으로 설정되어 오류와 경고만 보고합니다. [`TrainingArguments`]의 [`log_level`](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.log_level) 및 [`log_level_replica`](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.log_level_replica) 매개변수로 로그 레벨을 변경할 수 있습니다. 각 노드의 로그 레벨 설정을 구성하려면 [`log_on_each_node`](https://huggingface.co/docs/transformers/main/en/main_classes/trainer#transformers.TrainingArguments.log_on_each_node) 매개변수를 사용하여 각 노드에서 로그 레벨을 사용할지 아니면 주 노드에서만 사용할지 결정하세요. <Tip> [`Trainer`]는 [`Trainer.__init__`] 메소드에서 각 노드에 대해 로그 레벨을 별도로 설정하므로, 다른 Transformers 기능을 사용할 경우 [`Trainer`] 객체를 생성하기 전에 이를 미리 설정하는 것이 좋습니다. </Tip> 예를 들어, 메인 코드와 모듈을 각 노드에 따라 동일한 로그 레벨을 사용하도록 설정하려면 다음과 같이 합니다. ```py logger = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) trainer = Trainer(...) ``` 각 노드에서 기록될 내용을 구성하기 위해 `log_level`과 `log_level_replica`를 다양한 조합으로 사용해보세요. <hfoptions id="logging"> <hfoption id="single node"> ```bash my_app.py ... --log_level warning --log_level_replica error ``` </hfoption> <hfoption id="multi-node"> 멀티 노드 환경에서는 `log_on_each_node 0` 매개변수를 추가합니다. ```bash my_app.py ... --log_level warning --log_level_replica error --log_on_each_node 0 # 오류만 보고하도록 설정 my_app.py ... --log_level error --log_level_replica error --log_on_each_node 0 ``` </hfoption> </hfoptions> ## NEFTune [[neftune]] [NEFTune](https://hf.co/papers/2310.05914)은 훈련 중 임베딩 벡터에 노이즈를 추가하여 성능을 향상시킬 수 있는 기술입니다. [`Trainer`]에서 이를 활성화하려면 [`TrainingArguments`]의 `neftune_noise_alpha` 매개변수를 설정하여 노이즈의 양을 조절합니다. ```py from transformers import TrainingArguments, Trainer training_args = TrainingArguments(..., neftune_noise_alpha=0.1) trainer = Trainer(..., args=training_args) ``` NEFTune은 예상치 못한 동작을 피할 목적으로 처음 임베딩 레이어로 복원하기 위해 훈련 후 비활성화 됩니다. ## GaLore [[galore]] Gradient Low-Rank Projection (GaLore)은 전체 매개변수를 학습하면서도 LoRA와 같은 일반적인 저계수 적응 방법보다 더 메모리 효율적인 저계수 학습 전략입니다. 먼저 GaLore 공식 리포지토리를 설치합니다: ```bash pip install galore-torch ``` 그런 다음 `optim`에 `["galore_adamw", "galore_adafactor", "galore_adamw_8bit"]` 중 하나와 함께 `optim_target_modules`를 추가합니다. 이는 적용하려는 대상 모듈 이름에 해당하는 문자열, 정규 표현식 또는 전체 경로의 목록일 수 있습니다. 아래는 end-to-end 예제 스크립트입니다(필요한 경우 `pip install trl datasets`를 실행): ```python import datasets from trl import SFTConfig, SFTTrainer train_dataset = datasets.load_dataset('imdb', split='train') args = SFTConfig( output_dir="./test-galore", max_steps=100, optim="galore_adamw", optim_target_modules=[r".*.attn.*", r".*.mlp.*"], gradient_checkpointing=True, ) trainer = SFTTrainer( model="google/gemma-2b", args=args, train_dataset=train_dataset, ) trainer.train() ``` GaLore가 지원하는 추가 매개변수를 전달하려면 `optim_args`를 설정합니다. 예를 들어: ```python import datasets from trl import SFTConfig, SFTTrainer train_dataset = datasets.load_dataset('imdb', split='train') args = SFTConfig( output_dir="./test-galore", max_steps=100, optim="galore_adamw", optim_target_modules=[r".*.attn.*", r".*.mlp.*"], optim_args="rank=64, update_proj_gap=100, scale=0.10", gradient_checkpointing=True, ) trainer = SFTTrainer( model="google/gemma-2b", args=args, train_dataset=train_dataset, ) trainer.train() ``` 해당 방법에 대한 자세한 내용은 [원본 리포지토리](https://github.com/jiaweizzhao/GaLore) 또는 [논문](https://huggingface.co/papers/2403.03507)을 참고하세요. 현재 GaLore 레이어로 간주되는 Linear 레이어만 훈련 할수 있으며, 저계수 분해를 사용하여 훈련되고 나머지 레이어는 기존 방식으로 최적화됩니다. 훈련 시작 전에 시간이 약간 걸릴 수 있습니다(NVIDIA A100에서 2B 모델의 경우 약 3분), 하지만 이후 훈련은 원활하게 진행됩니다. 다음과 같이 옵티마이저 이름에 `layerwise`를 추가하여 레이어별 최적화를 수행할 수도 있습니다: ```python import datasets from trl import SFTConfig, SFTTrainer train_dataset = datasets.load_dataset('imdb', split='train') args = SFTConfig( output_dir="./test-galore", max_steps=100, optim="galore_adamw_layerwise", optim_target_modules=[r".*.attn.*", r".*.mlp.*"], gradient_checkpointing=True, ) trainer = SFTTrainer( model="google/gemma-2b", args=args, train_dataset=train_dataset, ) trainer.train() ``` 레이어별 최적화는 다소 실험적이며 DDP(분산 데이터 병렬)를 지원하지 않으므로, 단일 GPU에서만 훈련 스크립트를 실행할 수 있습니다. 자세한 내용은 [이 문서를](https://github.com/jiaweizzhao/GaLore?tab=readme-ov-file#train-7b-model-with-a-single-gpu-with-24gb-memory)을 참조하세요. gradient clipping, DeepSpeed 등 다른 기능은 기본적으로 지원되지 않을 수 있습니다. 이러한 문제가 발생하면 [GitHub에 이슈를 올려주세요](https://github.com/huggingface/transformers/issues). ## LOMO 옵티마이저 [[lomo-optimizer]] LOMO 옵티마이저는 [제한된 자원으로 대형 언어 모델의 전체 매개변수 미세 조정](https://hf.co/papers/2306.09782)과 [적응형 학습률을 통한 저메모리 최적화(AdaLomo)](https://hf.co/papers/2310.10195)에서 도입되었습니다. 이들은 모두 효율적인 전체 매개변수 미세 조정 방법으로 구성되어 있습니다. 이러한 옵티마이저들은 메모리 사용량을 줄이기 위해 그레이디언트 계산과 매개변수 업데이트를 하나의 단계로 융합합니다. LOMO에서 지원되는 옵티마이저는 `"lomo"`와 `"adalomo"`입니다. 먼저 pypi에서 `pip install lomo-optim`를 통해 `lomo`를 설치하거나, GitHub 소스에서 `pip install git+https://github.com/OpenLMLab/LOMO.git`로 설치하세요. <Tip> 저자에 따르면, `grad_norm` 없이 `AdaLomo`를 사용하는 것이 더 나은 성능과 높은 처리량을 제공한다고 합니다. </Tip> 다음은 IMDB 데이터셋에서 [google/gemma-2b](https://huggingface.co/google/gemma-2b)를 최대 정밀도로 미세 조정하는 간단한 스크립트입니다: ```python import datasets from trl import SFTConfig, SFTTrainer train_dataset = datasets.load_dataset('imdb', split='train') args = SFTConfig( output_dir="./test-lomo", max_steps=100, optim="adalomo", gradient_checkpointing=True, ) trainer = SFTTrainer( model="google/gemma-2b", args=args, train_dataset=train_dataset, ) trainer.train() ``` ## Accelerate와 Trainer [[accelerate-and-trainer]] [`Trainer`] 클래스는 [Accelerate](https://hf.co/docs/accelerate)로 구동되며, 이는 [FullyShardedDataParallel (FSDP)](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) 및 [DeepSpeed](https://www.deepspeed.ai/)와 같은 통합을 지원하는 분산 환경에서 PyTorch 모델을 쉽게 훈련할 수 있는 라이브러리입니다. <Tip> FSDP 샤딩 전략, CPU 오프로드 및 [`Trainer`]와 함께 사용할 수 있는 더 많은 기능을 알아보려면 [Fully Sharded Data Parallel](fsdp) 가이드를 확인하세요. </Tip> [`Trainer`]와 Accelerate를 사용하려면 [`accelerate.config`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-config) 명령을 실행하여 훈련 환경을 설정하세요. 이 명령은 훈련 스크립트를 실행할 때 사용할 `config_file.yaml`을 생성합니다. 예를 들어, 다음 예시는 설정할 수 있는 일부 구성 예입니다. <hfoptions id="config"> <hfoption id="DistributedDataParallel"> ```yml compute_environment: LOCAL_MACHINE distributed_type: MULTI_GPU downcast_bf16: 'no' gpu_ids: all machine_rank: 0 # 노드에 따라 순위를 변경하세요 main_process_ip: 192.168.20.1 main_process_port: 9898 main_training_function: main mixed_precision: fp16 num_machines: 2 num_processes: 8 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` </hfoption> <hfoption id="FSDP"> ```yml compute_environment: LOCAL_MACHINE distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_forward_prefetch: true fsdp_offload_params: false fsdp_sharding_strategy: 1 fsdp_state_dict_type: FULL_STATE_DICT fsdp_sync_module_states: true fsdp_transformer_layer_cls_to_wrap: BertLayer fsdp_use_orig_params: true machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` </hfoption> <hfoption id="DeepSpeed"> ```yml compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: /home/user/configs/ds_zero3_config.json zero3_init_flag: true distributed_type: DEEPSPEED downcast_bf16: 'no' machine_rank: 0 main_training_function: main num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` </hfoption> <hfoption id="DeepSpeed with Accelerate plugin"> ```yml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 0.7 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: true zero_stage: 2 distributed_type: DEEPSPEED downcast_bf16: 'no' machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` </hfoption> </hfoptions> [`accelerate_launch`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-launch) 명령은 Accelerate와 [`Trainer`]를 사용하여 분산 시스템에서 훈련 스크립트를 실행하는 권장 방법이며, `config_file.yaml`에 지정된 매개변수를 사용합니다. 이 파일은 Accelerate 캐시 폴더에 저장되며 `accelerate_launch`를 실행할 때 자동으로 로드됩니다. 예를 들어, FSDP 구성을 사용하여 [run_glue.py](https://github.com/huggingface/transformers/blob/f4db565b695582891e43a5e042e5d318e28f20b8/examples/pytorch/text-classification/run_glue.py#L4) 훈련 스크립트를 실행하려면 다음과 같이 합니다: ```bash accelerate launch \ ./examples/pytorch/text-classification/run_glue.py \ --model_name_or_path google-bert/bert-base-cased \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_device_train_batch_size 16 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ --overwrite_output_dir ``` `config_file.yaml` 파일의 매개변수를 직접 지정할 수도 있습니다: ```bash accelerate launch --num_processes=2 \ --use_fsdp \ --mixed_precision=bf16 \ --fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP \ --fsdp_transformer_layer_cls_to_wrap="BertLayer" \ --fsdp_sharding_strategy=1 \ --fsdp_state_dict_type=FULL_STATE_DICT \ ./examples/pytorch/text-classification/run_glue.py \ --model_name_or_path google-bert/bert-base-cased \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_device_train_batch_size 16 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ --overwrite_output_dir ``` `accelerate_launch`와 사용자 정의 구성에 대해 더 알아보려면 [Accelerate 스크립트 실행](https://huggingface.co/docs/accelerate/basic_tutorials/launch) 튜토리얼을 확인하세요.
transformers/docs/source/ko/trainer.md/0
{ "file_path": "transformers/docs/source/ko/trainer.md", "repo_id": "transformers", "token_count": 15482 }
382
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tour rápido [[open-in-colab]] Comece a trabalhar com 🤗 Transformers! Comece usando [`pipeline`] para rápida inferência e facilmente carregue um modelo pré-treinado e um tokenizer com [AutoClass](./model_doc/auto) para resolver tarefas de texto, visão ou áudio. <Tip> Todos os exemplos de código apresentados na documentação têm um botão no canto superior direito para escolher se você deseja ocultar ou mostrar o código no Pytorch ou no TensorFlow. Caso contrário, é esperado que funcione para ambos back-ends sem nenhuma alteração. </Tip> ## Pipeline [`pipeline`] é a maneira mais fácil de usar um modelo pré-treinado para uma dada tarefa. <Youtube id="tiZFewofSLM"/> A [`pipeline`] apoia diversas tarefas fora da caixa: **Texto**: * Análise sentimental: classifica a polaridade de um texto. * Geração de texto (em Inglês): gera texto a partir de uma entrada. * Reconhecimento de entidade mencionada: legenda cada palavra com uma classe que a representa (pessoa, data, local, etc...) * Respostas: extrai uma resposta dado algum contexto e uma questão * Máscara de preenchimento: preenche o espaço, dado um texto com máscaras de palavras. * Sumarização: gera o resumo de um texto longo ou documento. * Tradução: traduz texto para outra língua. * Extração de características: cria um tensor que representa o texto. **Imagem**: * Classificação de imagens: classifica uma imagem. * Segmentação de imagem: classifica cada pixel da imagem. * Detecção de objetos: detecta objetos em uma imagem. **Audio**: * Classficação de áudio: legenda um trecho de áudio fornecido. * Reconhecimento de fala automático: transcreve audio em texto. <Tip> Para mais detalhes sobre a [`pipeline`] e tarefas associadas, siga a documentação [aqui](./main_classes/pipelines). </Tip> ### Uso da pipeline No exemplo a seguir, você usará [`pipeline`] para análise sentimental. Instale as seguintes dependências se você ainda não o fez: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> Importe [`pipeline`] e especifique a tarefa que deseja completar: ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` A pipeline baixa and armazena um [modelo pré-treinado](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) padrão e tokenizer para análise sentimental. Agora você pode usar `classifier` no texto alvo: ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` Para mais de uma sentença, passe uma lista para a [`pipeline`], a qual retornará uma lista de dicionários: ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, with score: {round(result['score'], 4)}") label: POSITIVE, with score: 0.9998 label: NEGATIVE, with score: 0.5309 ``` A [`pipeline`] também pode iterar sobre um Dataset inteiro. Comece instalando a biblioteca de [🤗 Datasets](https://huggingface.co/docs/datasets/): ```bash pip install datasets ``` Crie uma [`pipeline`] com a tarefa que deseja resolver e o modelo que deseja usar. ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` A seguir, carregue uma base de dados (confira a 🤗 [Iniciação em Datasets](https://huggingface.co/docs/datasets/quickstart) para mais detalhes) que você gostaria de iterar sobre. Por exemplo, vamos carregar o dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` Precisamos garantir que a taxa de amostragem do conjunto de dados corresponda à taxa de amostragem em que o facebook/wav2vec2-base-960h foi treinado. ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` Os arquivos de áudio são carregados e re-amostrados automaticamente ao chamar a coluna `"audio"`. Vamos extrair as arrays de formas de onda originais das primeiras 4 amostras e passá-las como uma lista para o pipeline: ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I TURN A JOIN A COUNT'] ``` Para um conjunto de dados maior onde as entradas são maiores (como em fala ou visão), será necessário passar um gerador em vez de uma lista que carregue todas as entradas na memória. Consulte a [documentação do pipeline](./main_classes/pipelines) para mais informações. ### Use outro modelo e tokenizer na pipeline A [`pipeline`] pode acomodar qualquer modelo do [Model Hub](https://huggingface.co/models), facilitando sua adaptação para outros casos de uso. Por exemplo, se você quiser um modelo capaz de lidar com texto em francês, use as tags no Model Hub para filtrar um modelo apropriado. O principal resultado filtrado retorna um [modelo BERT](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) bilíngue ajustado para análise de sentimentos. Ótimo, vamos usar este modelo! ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Use o [`AutoModelForSequenceClassification`] e [`AutoTokenizer`] para carregar o modelo pré-treinado e seu tokenizer associado (mais em `AutoClass` abaixo): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Use o [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] para carregar o modelo pré-treinado e o tokenizer associado (mais em `TFAutoClass` abaixo): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Então você pode especificar o modelo e o tokenizador na [`pipeline`] e aplicar o `classifier` no seu texto alvo: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` Se você não conseguir achar um modelo para o seu caso de uso, precisará usar fine-tune em um modelo pré-treinado nos seus dados. Veja nosso [tutorial de fine-tuning](./training) para descobrir como. Finalmente, depois que você tiver usado esse processo em seu modelo, considere compartilhá-lo conosco (veja o tutorial [aqui](./model_sharing)) na plataforma Model Hub afim de democratizar NLP! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> Por baixo dos panos, as classes [`AutoModelForSequenceClassification`] e [`AutoTokenizer`] trabalham juntas para fortificar o [`pipeline`]. Um [AutoClass](./model_doc/auto) é um atalho que automaticamente recupera a arquitetura de um modelo pré-treinado a partir de seu nome ou caminho. Basta selecionar a `AutoClass` apropriada para sua tarefa e seu tokenizer associado com [`AutoTokenizer`]. Vamos voltar ao nosso exemplo e ver como você pode usar a `AutoClass` para replicar os resultados do [`pipeline`]. ### AutoTokenizer Um tokenizer é responsável por pré-processar o texto em um formato que seja compreensível para o modelo. Primeiro, o tokenizer dividirá o texto em palavras chamadas *tokens*. Existem várias regras que regem o processo de tokenização, incluindo como dividir uma palavra e em que nível (saiba mais sobre tokenização [aqui](./tokenizer_summary)). A coisa mais importante a lembrar, porém, é que você precisa instanciar o tokenizer com o mesmo nome do modelo para garantir que está usando as mesmas regras de tokenização com as quais um modelo foi pré-treinado. Carregue um tokenizer com [`AutoTokenizer`]: ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Em seguida, o tokenizer converte os tokens em números para construir um tensor como entrada para o modelo. Isso é conhecido como o *vocabulário* do modelo. Passe o texto para o tokenizer: ```py >>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` O tokenizer retornará um dicionário contendo: * [input_ids](./glossary#input-ids): representações numéricas de seus tokens. * [attention_mask](.glossary#attention-mask): indica quais tokens devem ser atendidos. Assim como o [`pipeline`], o tokenizer aceitará uma lista de entradas. Além disso, o tokenizer também pode preencher e truncar o texto para retornar um lote com comprimento uniforme: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🤗 transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> Leia o tutorial de [pré-processamento](./pré-processamento) para obter mais detalhes sobre tokenização. ### AutoModel <frameworkcontent> <pt> 🤗 Transformers fornecem uma maneira simples e unificada de carregar instâncias pré-treinadas. Isso significa que você pode carregar um [`AutoModel`] como carregaria um [`AutoTokenizer`]. A única diferença é selecionar o [`AutoModel`] correto para a tarefa. Como você está fazendo classificação de texto ou sequência, carregue [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Veja o [sumário de tarefas](./task_summary) para qual classe de [`AutoModel`] usar para cada tarefa. </Tip> Agora você pode passar seu grupo de entradas pré-processadas diretamente para o modelo. Você apenas tem que descompactar o dicionário usando `**`: ```py >>> pt_outputs = pt_model(**pt_batch) ``` O modelo gera as ativações finais no atributo `logits`. Aplique a função softmax aos `logits` para recuperar as probabilidades: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformers fornecem uma maneira simples e unificada de carregar instâncias pré-treinadas. Isso significa que você pode carregar um [`TFAutoModel`] como carregaria um [`AutoTokenizer`]. A única diferença é selecionar o [`TFAutoModel`] correto para a tarefa. Como você está fazendo classificação de texto ou sequência, carregue [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Veja o [sumário de tarefas](./task_summary) para qual classe de [`AutoModel`] usar para cada tarefa. </Tip> Agora você pode passar seu grupo de entradas pré-processadas diretamente para o modelo através da passagem de chaves de dicionários ao tensor. ```py >>> tf_outputs = tf_model(tf_batch) ``` O modelo gera as ativações finais no atributo `logits`. Aplique a função softmax aos `logits` para recuperar as probabilidades: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> Todos os modelos de 🤗 Transformers (PyTorch ou TensorFlow) geram tensores *antes* da função de ativação final (como softmax) pois essa função algumas vezes é fundida com a perda. </Tip> Os modelos são um standard [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) ou um [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) para que você possa usá-los em seu loop de treinamento habitual. No entanto, para facilitar as coisas, 🤗 Transformers fornece uma classe [`Trainer`] para PyTorch que adiciona funcionalidade para treinamento distribuído, precisão mista e muito mais. Para o TensorFlow, você pode usar o método `fit` de [Keras](https://keras.io/). Consulte o [tutorial de treinamento](./training) para obter mais detalhes. <Tip> As saídas do modelo 🤗 Transformers são classes de dados especiais para que seus atributos sejam preenchidos automaticamente em um IDE. As saídas do modelo também se comportam como uma tupla ou um dicionário (por exemplo, você pode indexar com um inteiro, uma parte ou uma string), caso em que os atributos `None` são ignorados. </Tip> ### Salvar um modelo <frameworkcontent> <pt> Uma vez que seu modelo estiver afinado, você pode salvá-lo com seu Tokenizer usando [`PreTrainedModel.save_pretrained`]: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` Quando você estiver pronto para usá-lo novamente, recarregue com [`PreTrainedModel.from_pretrained`]: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Uma vez que seu modelo estiver afinado, você pode salvá-lo com seu Tokenizer usando [`TFPreTrainedModel.save_pretrained`]: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` Quando você estiver pronto para usá-lo novamente, recarregue com [`TFPreTrainedModel.from_pretrained`] ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> Um recurso particularmente interessante dos 🤗 Transformers é a capacidade de salvar um modelo e recarregá-lo como um modelo PyTorch ou TensorFlow. Use `from_pt` ou `from_tf` para converter o modelo de um framework para outro: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </tf> </frameworkcontent>
transformers/docs/source/pt/quicktour.md/0
{ "file_path": "transformers/docs/source/pt/quicktour.md", "repo_id": "transformers", "token_count": 6103 }
383
<!--版权2020年HuggingFace团队保留所有权利。 根据Apache许可证第2.0版(“许可证”)许可;除非符合许可证,否则您不得使用此文件。您可以在以下网址获取许可证的副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,否则按“按原样”分发的软件,无论是明示还是暗示的,都没有任何担保或条件。请参阅许可证以了解特定语言下的权限和限制。 ⚠️ 请注意,本文件虽然使用Markdown编写,但包含了特定的语法,适用于我们的doc-builder(类似于MDX),可能无法在您的Markdown查看器中正常渲染。 --> # 基于BERT进行的相关研究(BERTology) 当前,一个新兴的研究领域正致力于探索大规模 transformer 模型(如BERT)的内部工作机制,一些人称之为“BERTology”。以下是这个领域的一些典型示例: - BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick: https://huggingface.co/papers/1905.05950 - Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://huggingface.co/papers/1905.10650 - What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D. Manning: https://huggingface.co/papers/1906.04341 - CAT-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure: https://huggingface.co/papers/2210.04633 为了助力这一新兴领域的发展,我们在BERT/GPT/GPT-2模型中增加了一些附加功能,方便人们访问其内部表示,这些功能主要借鉴了Paul Michel的杰出工作(https://huggingface.co/papers/1905.10650): - 访问BERT/GPT/GPT-2的所有隐藏状态, - 访问BERT/GPT/GPT-2每个注意力头的所有注意力权重, - 检索注意力头的输出值和梯度,以便计算头的重要性得分并对头进行剪枝,详情可见论文:https://huggingface.co/papers/1905.10650。 为了帮助您理解和使用这些功能,我们添加了一个具体的示例脚本:[bertology.py](https://github.com/huggingface/transformers-research-projects/tree/main/bertology/run_bertology.py),该脚本可以对一个在 GLUE 数据集上预训练的模型进行信息提取与剪枝。
transformers/docs/source/zh/bertology.md/0
{ "file_path": "transformers/docs/source/zh/bertology.md", "repo_id": "transformers", "token_count": 1343 }
384
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 用于生成的工具 此页面列出了所有由 [`~generation.GenerationMixin.generate`]。 ## 生成输出 [`~generation.GenerationMixin.generate`] 的输出是 [`~utils.ModelOutput`] 的一个子类的实例。这个输出是一种包含 [`~generation.GenerationMixin.generate`] 返回的所有信息数据结构,但也可以作为元组或字典使用。 这里是一个例子: ```python from transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2") inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt") generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) ``` `generation_output` 的对象是 [`~generation.GenerateDecoderOnlyOutput`] 的一个实例,从该类的文档中我们可以看到,这意味着它具有以下属性: - `sequences`: 生成的tokens序列 - `scores`(可选): 每个生成步骤的语言建模头的预测分数 - `hidden_states`(可选): 每个生成步骤模型的hidden states - `attentions`(可选): 每个生成步骤模型的注意力权重 在这里,由于我们传递了 `output_scores=True`,我们具有 `scores` 属性。但我们没有 `hidden_states` 和 `attentions`,因为没有传递 `output_hidden_states=True` 或 `output_attentions=True`。 您可以像通常一样访问每个属性,如果该属性未被模型返回,则将获得 `None`。例如,在这里 `generation_output.scores` 是语言建模头的所有生成预测分数,而 `generation_output.attentions` 为 `None`。 当我们将 `generation_output` 对象用作元组时,它只保留非 `None` 值的属性。例如,在这里它有两个元素,`loss` 然后是 `logits`,所以 ```python generation_output[:2] ``` 将返回元组`(generation_output.sequences, generation_output.scores)`。 当我们将`generation_output`对象用作字典时,它只保留非`None`的属性。例如,它有两个键,分别是`sequences`和`scores`。 我们在此记录所有输出类型。 ### PyTorch [[autodoc]] generation.GenerateDecoderOnlyOutput [[autodoc]] generation.GenerateEncoderDecoderOutput [[autodoc]] generation.GenerateBeamDecoderOnlyOutput [[autodoc]] generation.GenerateBeamEncoderDecoderOutput ### TensorFlow [[autodoc]] generation.TFGreedySearchEncoderDecoderOutput [[autodoc]] generation.TFGreedySearchDecoderOnlyOutput [[autodoc]] generation.TFSampleEncoderDecoderOutput [[autodoc]] generation.TFSampleDecoderOnlyOutput [[autodoc]] generation.TFBeamSearchEncoderDecoderOutput [[autodoc]] generation.TFBeamSearchDecoderOnlyOutput [[autodoc]] generation.TFBeamSampleEncoderDecoderOutput [[autodoc]] generation.TFBeamSampleDecoderOnlyOutput [[autodoc]] generation.TFContrastiveSearchEncoderDecoderOutput [[autodoc]] generation.TFContrastiveSearchDecoderOnlyOutput ### FLAX [[autodoc]] generation.FlaxSampleOutput [[autodoc]] generation.FlaxGreedySearchOutput [[autodoc]] generation.FlaxBeamSearchOutput ## LogitsProcessor [`LogitsProcessor`] 可以用于修改语言模型头的预测分数以进行生成 ### PyTorch [[autodoc]] AlternatingCodebooksLogitsProcessor - __call__ [[autodoc]] ClassifierFreeGuidanceLogitsProcessor - __call__ [[autodoc]] EncoderNoRepeatNGramLogitsProcessor - __call__ [[autodoc]] EncoderRepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] EpsilonLogitsWarper - __call__ [[autodoc]] EtaLogitsWarper - __call__ [[autodoc]] ExponentialDecayLengthPenalty - __call__ [[autodoc]] ForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] ForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] HammingDiversityLogitsProcessor - __call__ [[autodoc]] InfNanRemoveLogitsProcessor - __call__ [[autodoc]] LogitNormalization - __call__ [[autodoc]] LogitsProcessor - __call__ [[autodoc]] LogitsProcessorList - __call__ [[autodoc]] MinLengthLogitsProcessor - __call__ [[autodoc]] MinNewTokensLengthLogitsProcessor - __call__ [[autodoc]] NoBadWordsLogitsProcessor - __call__ [[autodoc]] NoRepeatNGramLogitsProcessor - __call__ [[autodoc]] PrefixConstrainedLogitsProcessor - __call__ [[autodoc]] RepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] SequenceBiasLogitsProcessor - __call__ [[autodoc]] SuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] SuppressTokensLogitsProcessor - __call__ [[autodoc]] TemperatureLogitsWarper - __call__ [[autodoc]] TopKLogitsWarper - __call__ [[autodoc]] TopPLogitsWarper - __call__ [[autodoc]] TypicalLogitsWarper - __call__ [[autodoc]] UnbatchedClassifierFreeGuidanceLogitsProcessor - __call__ [[autodoc]] WhisperTimeStampLogitsProcessor - __call__ ### TensorFlow [[autodoc]] TFForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] TFForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] TFForceTokensLogitsProcessor - __call__ [[autodoc]] TFLogitsProcessor - __call__ [[autodoc]] TFLogitsProcessorList - __call__ [[autodoc]] TFLogitsWarper - __call__ [[autodoc]] TFMinLengthLogitsProcessor - __call__ [[autodoc]] TFNoBadWordsLogitsProcessor - __call__ [[autodoc]] TFNoRepeatNGramLogitsProcessor - __call__ [[autodoc]] TFRepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] TFSuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] TFSuppressTokensLogitsProcessor - __call__ [[autodoc]] TFTemperatureLogitsWarper - __call__ [[autodoc]] TFTopKLogitsWarper - __call__ [[autodoc]] TFTopPLogitsWarper - __call__ ### FLAX [[autodoc]] FlaxForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] FlaxForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] FlaxForceTokensLogitsProcessor - __call__ [[autodoc]] FlaxLogitsProcessor - __call__ [[autodoc]] FlaxLogitsProcessorList - __call__ [[autodoc]] FlaxLogitsWarper - __call__ [[autodoc]] FlaxMinLengthLogitsProcessor - __call__ [[autodoc]] FlaxSuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] FlaxSuppressTokensLogitsProcessor - __call__ [[autodoc]] FlaxTemperatureLogitsWarper - __call__ [[autodoc]] FlaxTopKLogitsWarper - __call__ [[autodoc]] FlaxTopPLogitsWarper - __call__ [[autodoc]] FlaxWhisperTimeStampLogitsProcessor - __call__ ## StoppingCriteria 可以使用[`StoppingCriteria`]来更改停止生成的时间(除了EOS token以外的方法)。请注意,这仅适用于我们的PyTorch实现。 [[autodoc]] StoppingCriteria - __call__ [[autodoc]] StoppingCriteriaList - __call__ [[autodoc]] MaxLengthCriteria - __call__ [[autodoc]] MaxTimeCriteria - __call__ ## Constraints 可以使用[`Constraint`]来强制生成结果包含输出中的特定tokens或序列。请注意,这仅适用于我们的PyTorch实现。 [[autodoc]] Constraint [[autodoc]] PhrasalConstraint [[autodoc]] DisjunctiveConstraint [[autodoc]] ConstraintListState ## BeamSearch [[autodoc]] BeamScorer - process - finalize [[autodoc]] BeamSearchScorer - process - finalize [[autodoc]] ConstrainedBeamSearchScorer - process - finalize ## Streamers [[autodoc]] TextStreamer [[autodoc]] TextIteratorStreamer
transformers/docs/source/zh/internal/generation_utils.md/0
{ "file_path": "transformers/docs/source/zh/internal/generation_utils.md", "repo_id": "transformers", "token_count": 3447 }
385
<!--版权所有 2020 年 HuggingFace 团队。保留所有权利。 根据 Apache 许可证 2.0 版本许可,除非符合许可证的规定,否则您不得使用此文件。您可以在以下网址获取许可证的副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,否则依照许可证分发的软件是基于“原样”提供的,不附带任何明示或暗示的担保或条件。有关特定语言下权限的限制和限制,请参阅许可证。--> # 模型 基类 [`PreTrainedModel`]、[`TFPreTrainedModel`] 和 [`FlaxPreTrainedModel`] 实现了从本地文件或目录加载/保存模型的常用方法,或者从库上提供的预训练模型配置(从 HuggingFace 的 AWS S3 存储库下载)加载模型。 [`PreTrainedModel`] 和 [`TFPreTrainedModel`] 还实现了一些所有模型共有的方法: - 在向量词嵌入增加新词汇时调整输入标记(token)的大小 - 对模型的注意力头进行修剪。 其他的通用方法在 [`~modeling_utils.ModuleUtilsMixin`](用于 PyTorch 模型)和 [`~modeling_tf_utils.TFModuleUtilsMixin`](用于 TensorFlow 模型)中定义;文本生成方面的方法则定义在 [`~generation.GenerationMixin`](用于 PyTorch 模型)、[`~generation.TFGenerationMixin`](用于 TensorFlow 模型)和 [`~generation.FlaxGenerationMixin`](用于 Flax/JAX 模型)中。 ## PreTrainedModel [[autodoc]] PreTrainedModel - push_to_hub - all <a id='from_pretrained-torch-dtype'></a> ### 大模型加载 在 Transformers 4.20.0 中,[`~PreTrainedModel.from_pretrained`] 方法已重新设计,以适应使用 [Accelerate](https://huggingface.co/docs/accelerate/big_modeling) 加载大型模型的场景。这需要您使用的 Accelerate 和 PyTorch 版本满足: Accelerate >= 0.9.0, PyTorch >= 1.9.0。除了创建完整模型,然后在其中加载预训练权重(这会占用两倍于模型大小的内存空间,一个用于随机初始化模型,一个用于预训练权重),我们提供了一种选项,将模型创建为空壳,然后只有在加载预训练权重时才实例化其参数。 此外,如果内存不足以放下加载整个模型(目前仅适用于推理),您可以直接将模型放置在不同的设备上。使用 `device_map="auto"`,Accelerate 将确定将每一层放置在哪个设备上,以最大化使用最快的设备(GPU),并将其余部分卸载到 CPU,甚至硬盘上(如果您没有足够的 GPU 内存 或 CPU 内存)。即使模型分布在几个设备上,它也将像您通常期望的那样运行。 ```python from transformers import AutoModelForSeq2SeqLM t0pp = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto") ``` 您可以通过 `hf_device_map` 属性来查看模型是如何在设备上分割的: ```python t0pp.hf_device_map {'shared': 0, 'decoder.embed_tokens': 0, 'encoder': 0, 'decoder.block.0': 0, 'decoder.block.1': 1, 'decoder.block.2': 1, 'decoder.block.3': 1, 'decoder.block.4': 1, 'decoder.block.5': 1, 'decoder.block.6': 1, 'decoder.block.7': 1, 'decoder.block.8': 1, 'decoder.block.9': 1, 'decoder.block.10': 1, 'decoder.block.11': 1, 'decoder.block.12': 1, 'decoder.block.13': 1, 'decoder.block.14': 1, 'decoder.block.15': 1, 'decoder.block.16': 1, 'decoder.block.17': 1, 'decoder.block.18': 1, 'decoder.block.19': 1, 'decoder.block.20': 1, 'decoder.block.21': 1, 'decoder.block.22': 'cpu', 'decoder.block.23': 'cpu', 'decoder.final_layer_norm': 'cpu', 'decoder.dropout': 'cpu', 'lm_head': 'cpu'} ``` 您还可以按照相同的格式(一个层名称到设备的映射关系的字典)编写自己的设备映射规则。它应该将模型的所有参数映射到给定的设备上,如果该层的所有子模块都在同一设备上,您不必详细说明其中所有子模块的位置。例如,以下设备映射对于 T0pp 将正常工作(只要您有 GPU 内存): ```python device_map = {"shared": 0, "encoder": 0, "decoder": 1, "lm_head": 1} ``` 另一种减少模型内存影响的方法是以较低精度的 dtype(例如 `torch.float16`)实例化它,或者使用下面介绍的直接量化技术。 ### 模型实例化 dtype 在 PyTorch 下,模型通常以 `torch.float32` 格式实例化。如果尝试加载权重为 fp16 的模型,这可能会导致问题,因为它将需要两倍的内存。为了克服此限制,您可以使用 `dtype` 参数显式传递所需的 `dtype`: ```python model = T5ForConditionalGeneration.from_pretrained("t5", dtype=torch.float16) ``` 或者,如果您希望模型始终以最优的内存模式加载,则可以使用特殊值 `"auto"`,然后 `dtype` 将自动从模型的权重中推导出: ```python model = T5ForConditionalGeneration.from_pretrained("t5", dtype="auto") ``` 也可以通过以下方式告知从头开始实例化的模型要使用哪种 `dtype`: ```python config = T5Config.from_pretrained("t5") model = AutoModel.from_config(config) ``` 由于 PyTorch 的设计,此功能仅适用于浮点类型。 ## ModuleUtilsMixin [[autodoc]] modeling_utils.ModuleUtilsMixin TFPreTrainedModel [[autodoc]] TFPreTrainedModel - push_to_hub - all ## TFModelUtilsMixin [[autodoc]] modeling_tf_utils.TFModelUtilsMixin FlaxPreTrainedModel [[autodoc]] FlaxPreTrainedModel - push_to_hub - all ## 推送到 Hub [[autodoc]] utils.PushToHubMixin ## 分片检查点 [[autodoc]] modeling_utils.load_sharded_checkpoint
transformers/docs/source/zh/main_classes/model.md/0
{ "file_path": "transformers/docs/source/zh/main_classes/model.md", "repo_id": "transformers", "token_count": 3276 }
386
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 使用 torch.compile() 优化推理 本指南旨在为使用[`torch.compile()`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html)在[🤗 Transformers中的计算机视觉模型](https://huggingface.co/models?pipeline_tag=image-classification&library=transformers&sort=trending)中引入的推理速度提升提供一个基准。 ## torch.compile 的优势 根据模型和GPU的不同,`torch.compile()`在推理过程中可以提高多达30%的速度。要使用`torch.compile()`,只需安装2.0及以上版本的`torch`即可。 编译模型需要时间,因此如果您只需要编译一次模型而不是每次推理都编译,那么它非常有用。 要编译您选择的任何计算机视觉模型,请按照以下方式调用`torch.compile()`: ```diff from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained(MODEL_ID).to("cuda") + model = torch.compile(model) ``` `compile()` 提供了多种编译模式,它们在编译时间和推理开销上有所不同。`max-autotune` 比 `reduce-overhead` 需要更长的时间,但会得到更快的推理速度。默认模式在编译时最快,但在推理时间上与 `reduce-overhead` 相比效率较低。在本指南中,我们使用了默认模式。您可以在[这里](https://pytorch.org/get-started/pytorch-2.0/#user-experience)了解更多信息。 我们在 PyTorch 2.0.1 版本上使用不同的计算机视觉模型、任务、硬件类型和数据批量大小对 `torch.compile` 进行了基准测试。 ## 基准测试代码 以下是每个任务的基准测试代码。我们在推理之前”预热“GPU,并取300次推理的平均值,每次使用相同的图像。 ### 使用 ViT 进行图像分类 ```python import torch from PIL import Image import requests import numpy as np from transformers import AutoImageProcessor, AutoModelForImageClassification url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") model = AutoModelForImageClassification.from_pretrained("google/vit-base-patch16-224").to("cuda") model = torch.compile(model) processed_input = processor(image, return_tensors='pt').to(device="cuda") with torch.no_grad(): _ = model(**processed_input) ``` #### 使用 DETR 进行目标检测 ```python from transformers import AutoImageProcessor, AutoModelForObjectDetection processor = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50") model = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50").to("cuda") model = torch.compile(model) texts = ["a photo of a cat", "a photo of a dog"] inputs = processor(text=texts, images=image, return_tensors="pt").to("cuda") with torch.no_grad(): _ = model(**inputs) ``` #### 使用 Segformer 进行图像分割 ```python from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation processor = SegformerImageProcessor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to("cuda") model = torch.compile(model) seg_inputs = processor(images=image, return_tensors="pt").to("cuda") with torch.no_grad(): _ = model(**seg_inputs) ``` 以下是我们进行基准测试的模型列表。 **图像分类** - [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) - [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) - [facebook/convnext-large-224](https://huggingface.co/facebook/convnext-large-224) - [microsoft/resnet-50](https://huggingface.co/) **图像分割** - [nvidia/segformer-b0-finetuned-ade-512-512](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512) - [facebook/mask2former-swin-tiny-coco-panoptic](https://huggingface.co/facebook/mask2former-swin-tiny-coco-panoptic) - [facebook/maskformer-swin-base-ade](https://huggingface.co/facebook/maskformer-swin-base-ade) - [google/deeplabv3_mobilenet_v2_1.0_513](https://huggingface.co/google/deeplabv3_mobilenet_v2_1.0_513) **目标检测** - [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) - [facebook/detr-resnet-101](https://huggingface.co/facebook/detr-resnet-101) - [microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50) 下面是使用和不使用`torch.compile()`的推理持续时间可视化,以及每个模型在不同硬件和数据批量大小下的改进百分比。 <div class="flex"> <div> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/a100_batch_comp.png" /> </div> <div> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/v100_batch_comp.png" /> </div> <div> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/t4_batch_comp.png" /> </div> </div> <div class="flex"> <div> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/A100_1_duration.png" /> </div> <div> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/A100_1_percentage.png" /> </div> </div> ![Duration Comparison on V100 with Batch Size of 1](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/v100_1_duration.png) ![Percentage Improvement on T4 with Batch Size of 4](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/torch_compile/T4_4_percentage.png) 下面可以找到每个模型使用和不使用`compile()`的推理时间(毫秒)。请注意,OwlViT在大批量大小下会导致内存溢出。 ### A100 (batch size: 1) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 9.325 | 7.584 | | Image Segmentation/Segformer | 11.759 | 10.500 | | Object Detection/OwlViT | 24.978 | 18.420 | | Image Classification/BeiT | 11.282 | 8.448 | | Object Detection/DETR | 34.619 | 19.040 | | Image Classification/ConvNeXT | 10.410 | 10.208 | | Image Classification/ResNet | 6.531 | 4.124 | | Image Segmentation/Mask2former | 60.188 | 49.117 | | Image Segmentation/Maskformer | 75.764 | 59.487 | | Image Segmentation/MobileNet | 8.583 | 3.974 | | Object Detection/Resnet-101 | 36.276 | 18.197 | | Object Detection/Conditional-DETR | 31.219 | 17.993 | ### A100 (batch size: 4) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 14.832 | 14.499 | | Image Segmentation/Segformer | 18.838 | 16.476 | | Image Classification/BeiT | 13.205 | 13.048 | | Object Detection/DETR | 48.657 | 32.418| | Image Classification/ConvNeXT | 22.940 | 21.631 | | Image Classification/ResNet | 6.657 | 4.268 | | Image Segmentation/Mask2former | 74.277 | 61.781 | | Image Segmentation/Maskformer | 180.700 | 159.116 | | Image Segmentation/MobileNet | 14.174 | 8.515 | | Object Detection/Resnet-101 | 68.101 | 44.998 | | Object Detection/Conditional-DETR | 56.470 | 35.552 | ### A100 (batch size: 16) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 40.944 | 40.010 | | Image Segmentation/Segformer | 37.005 | 31.144 | | Image Classification/BeiT | 41.854 | 41.048 | | Object Detection/DETR | 164.382 | 161.902 | | Image Classification/ConvNeXT | 82.258 | 75.561 | | Image Classification/ResNet | 7.018 | 5.024 | | Image Segmentation/Mask2former | 178.945 | 154.814 | | Image Segmentation/Maskformer | 638.570 | 579.826 | | Image Segmentation/MobileNet | 51.693 | 30.310 | | Object Detection/Resnet-101 | 232.887 | 155.021 | | Object Detection/Conditional-DETR | 180.491 | 124.032 | ### V100 (batch size: 1) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 10.495 | 6.00 | | Image Segmentation/Segformer | 13.321 | 5.862 | | Object Detection/OwlViT | 25.769 | 22.395 | | Image Classification/BeiT | 11.347 | 7.234 | | Object Detection/DETR | 33.951 | 19.388 | | Image Classification/ConvNeXT | 11.623 | 10.412 | | Image Classification/ResNet | 6.484 | 3.820 | | Image Segmentation/Mask2former | 64.640 | 49.873 | | Image Segmentation/Maskformer | 95.532 | 72.207 | | Image Segmentation/MobileNet | 9.217 | 4.753 | | Object Detection/Resnet-101 | 52.818 | 28.367 | | Object Detection/Conditional-DETR | 39.512 | 20.816 | ### V100 (batch size: 4) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 15.181 | 14.501 | | Image Segmentation/Segformer | 16.787 | 16.188 | | Image Classification/BeiT | 15.171 | 14.753 | | Object Detection/DETR | 88.529 | 64.195 | | Image Classification/ConvNeXT | 29.574 | 27.085 | | Image Classification/ResNet | 6.109 | 4.731 | | Image Segmentation/Mask2former | 90.402 | 76.926 | | Image Segmentation/Maskformer | 234.261 | 205.456 | | Image Segmentation/MobileNet | 24.623 | 14.816 | | Object Detection/Resnet-101 | 134.672 | 101.304 | | Object Detection/Conditional-DETR | 97.464 | 69.739 | ### V100 (batch size: 16) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 52.209 | 51.633 | | Image Segmentation/Segformer | 61.013 | 55.499 | | Image Classification/BeiT | 53.938 | 53.581 | | Object Detection/DETR | OOM | OOM | | Image Classification/ConvNeXT | 109.682 | 100.771 | | Image Classification/ResNet | 14.857 | 12.089 | | Image Segmentation/Mask2former | 249.605 | 222.801 | | Image Segmentation/Maskformer | 831.142 | 743.645 | | Image Segmentation/MobileNet | 93.129 | 55.365 | | Object Detection/Resnet-101 | 482.425 | 361.843 | | Object Detection/Conditional-DETR | 344.661 | 255.298 | ### T4 (batch size: 1) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 16.520 | 15.786 | | Image Segmentation/Segformer | 16.116 | 14.205 | | Object Detection/OwlViT | 53.634 | 51.105 | | Image Classification/BeiT | 16.464 | 15.710 | | Object Detection/DETR | 73.100 | 53.99 | | Image Classification/ConvNeXT | 32.932 | 30.845 | | Image Classification/ResNet | 6.031 | 4.321 | | Image Segmentation/Mask2former | 79.192 | 66.815 | | Image Segmentation/Maskformer | 200.026 | 188.268 | | Image Segmentation/MobileNet | 18.908 | 11.997 | | Object Detection/Resnet-101 | 106.622 | 82.566 | | Object Detection/Conditional-DETR | 77.594 | 56.984 | ### T4 (batch size: 4) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 43.653 | 43.626 | | Image Segmentation/Segformer | 45.327 | 42.445 | | Image Classification/BeiT | 52.007 | 51.354 | | Object Detection/DETR | 277.850 | 268.003 | | Image Classification/ConvNeXT | 119.259 | 105.580 | | Image Classification/ResNet | 13.039 | 11.388 | | Image Segmentation/Mask2former | 201.540 | 184.670 | | Image Segmentation/Maskformer | 764.052 | 711.280 | | Image Segmentation/MobileNet | 74.289 | 48.677 | | Object Detection/Resnet-101 | 421.859 | 357.614 | | Object Detection/Conditional-DETR | 289.002 | 226.945 | ### T4 (batch size: 16) | **Task/Model** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:| | Image Classification/ViT | 163.914 | 160.907 | | Image Segmentation/Segformer | 192.412 | 163.620 | | Image Classification/BeiT | 188.978 | 187.976 | | Object Detection/DETR | OOM | OOM | | Image Classification/ConvNeXT | 422.886 | 388.078 | | Image Classification/ResNet | 44.114 | 37.604 | | Image Segmentation/Mask2former | 756.337 | 695.291 | | Image Segmentation/Maskformer | 2842.940 | 2656.88 | | Image Segmentation/MobileNet | 299.003 | 201.942 | | Object Detection/Resnet-101 | 1619.505 | 1262.758 | | Object Detection/Conditional-DETR | 1137.513 | 897.390| ## PyTorch Nightly 我们还在 PyTorch Nightly 版本(2.1.0dev)上进行了基准测试,可以在[这里](https://download.pytorch.org/whl/nightly/cu118)找到 Nightly 版本的安装包,并观察到了未编译和编译模型的延迟性能改善。 ### A100 | **Task/Model** | **Batch Size** | **torch 2.0 - no compile** | **torch 2.0 -<br> compile** | |:---:|:---:|:---:|:---:| | Image Classification/BeiT | Unbatched | 12.462 | 6.954 | | Image Classification/BeiT | 4 | 14.109 | 12.851 | | Image Classification/BeiT | 16 | 42.179 | 42.147 | | Object Detection/DETR | Unbatched | 30.484 | 15.221 | | Object Detection/DETR | 4 | 46.816 | 30.942 | | Object Detection/DETR | 16 | 163.749 | 163.706 | ### T4 | **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:|:---:| | Image Classification/BeiT | Unbatched | 14.408 | 14.052 | | Image Classification/BeiT | 4 | 47.381 | 46.604 | | Image Classification/BeiT | 16 | 42.179 | 42.147 | | Object Detection/DETR | Unbatched | 68.382 | 53.481 | | Object Detection/DETR | 4 | 269.615 | 204.785 | | Object Detection/DETR | 16 | OOM | OOM | ### V100 | **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:|:---:| | Image Classification/BeiT | Unbatched | 13.477 | 7.926 | | Image Classification/BeiT | 4 | 15.103 | 14.378 | | Image Classification/BeiT | 16 | 52.517 | 51.691 | | Object Detection/DETR | Unbatched | 28.706 | 19.077 | | Object Detection/DETR | 4 | 88.402 | 62.949| | Object Detection/DETR | 16 | OOM | OOM | ## 降低开销 我们在 PyTorch Nightly 版本中为 A100 和 T4 进行了 `reduce-overhead` 编译模式的性能基准测试。 ### A100 | **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:|:---:| | Image Classification/ConvNeXT | Unbatched | 11.758 | 7.335 | | Image Classification/ConvNeXT | 4 | 23.171 | 21.490 | | Image Classification/ResNet | Unbatched | 7.435 | 3.801 | | Image Classification/ResNet | 4 | 7.261 | 2.187 | | Object Detection/Conditional-DETR | Unbatched | 32.823 | 11.627 | | Object Detection/Conditional-DETR | 4 | 50.622 | 33.831 | | Image Segmentation/MobileNet | Unbatched | 9.869 | 4.244 | | Image Segmentation/MobileNet | 4 | 14.385 | 7.946 | ### T4 | **Task/Model** | **Batch Size** | **torch 2.0 - <br>no compile** | **torch 2.0 - <br>compile** | |:---:|:---:|:---:|:---:| | Image Classification/ConvNeXT | Unbatched | 32.137 | 31.84 | | Image Classification/ConvNeXT | 4 | 120.944 | 110.209 | | Image Classification/ResNet | Unbatched | 9.761 | 7.698 | | Image Classification/ResNet | 4 | 15.215 | 13.871 | | Object Detection/Conditional-DETR | Unbatched | 72.150 | 57.660 | | Object Detection/Conditional-DETR | 4 | 301.494 | 247.543 | | Image Segmentation/MobileNet | Unbatched | 22.266 | 19.339 | | Image Segmentation/MobileNet | 4 | 78.311 | 50.983 |
transformers/docs/source/zh/perf_torch_compile.md/0
{ "file_path": "transformers/docs/source/zh/perf_torch_compile.md", "repo_id": "transformers", "token_count": 6785 }
387
<!-- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 导出为 TorchScript <Tip> 这是开始使用 TorchScript 进行实验的起点,我们仍在探索其在变量输入大小模型中的能力。 这是我们关注的焦点,我们将在即将发布的版本中深入分析,提供更多的代码示例、更灵活的实现以及比较 Python 代码与编译 TorchScript 的性能基准。 </Tip> 根据 [TorchScript 文档](https://pytorch.org/docs/stable/jit.html): > TorchScript 是从 PyTorch 代码创建可序列化和可优化的模型的一种方式。 有两个 PyTorch 模块:[JIT 和 TRACE](https://pytorch.org/docs/stable/jit.html)。 这两个模块允许开发人员将其模型导出到其他程序中重用,比如面向效率的 C++ 程序。 我们提供了一个接口,允许您将 🤗 Transformers 模型导出为 TorchScript, 以便在与基于 PyTorch 的 Python 程序不同的环境中重用。 本文解释如何使用 TorchScript 导出并使用我们的模型。 导出模型需要两个步骤: - 使用 `torchscript` 参数实例化模型 - 使用虚拟输入进行前向传递 这些必要条件意味着开发人员应该注意以下详细信息。 ## TorchScript 参数和绑定权重 `torchscript` 参数是必需的,因为大多数 🤗 Transformers 语言模型的 `Embedding` 层和 `Decoding` 层之间有绑定权重。TorchScript 不允许导出具有绑定权重的模型,因此必须事先解绑和克隆权重。 使用 `torchscript` 参数实例化的模型将其 `Embedding` 层和 `Decoding` 层分开, 这意味着它们不应该在后续进行训练。训练将导致这两层不同步,产生意外结果。 对于没有语言模型头部的模型,情况不同,因为这些模型没有绑定权重。 这些模型可以安全地导出而无需 `torchscript` 参数。 ## 虚拟输入和标准长度 虚拟输入用于模型的前向传递。当输入的值传播到各层时,PyTorch 会跟踪在每个张量上执行的不同操作。 然后使用记录的操作来创建模型的 *trace* 。 跟踪是相对于输入的维度创建的。因此,它受到虚拟输入的维度限制,对于任何其他序列长度或批量大小都不起作用。 当尝试使用不同大小时,会引发以下错误: ```text `The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2` ``` 我们建议使用至少与推断期间将馈送到模型的最大输入一样大的虚拟输入大小进行跟踪。 填充可以帮助填补缺失的值。然而,由于模型是使用更大的输入大小进行跟踪的,矩阵的维度也会很大,导致更多的计算。 在每个输入上执行的操作总数要仔细考虑,并在导出不同序列长度模型时密切关注性能。 ## 在 Python 中使用 TorchScript 本节演示了如何保存和加载模型以及如何使用 trace 进行推断。 ### 保存模型 要使用 TorchScript 导出 `BertModel`,请从 `BertConfig` 类实例化 `BertModel`, 然后将其保存到名为 `traced_bert.pt` 的磁盘文件中: ```python from transformers import BertModel, BertTokenizer, BertConfig import torch enc = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") # 对输入文本分词 text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" tokenized_text = enc.tokenize(text) # 屏蔽一个输入 token masked_index = 8 tokenized_text[masked_index] = "[MASK]" indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] # 创建虚拟输入 tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) dummy_input = [tokens_tensor, segments_tensors] # 使用 torchscript 参数初始化模型 # 即使此模型没有 LM Head,也将参数设置为 True。 config = BertConfig( vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, torchscript=True, ) # 实例化模型 model = BertModel(config) # 模型需要处于评估模式 model.eval() # 如果您使用 *from_pretrained* 实例化模型,还可以轻松设置 TorchScript 参数 model = BertModel.from_pretrained("google-bert/bert-base-uncased", torchscript=True) # 创建 trace traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) torch.jit.save(traced_model, "traced_bert.pt") ``` ### 加载模型 现在,您可以从磁盘加载先前保存的 `BertModel`、`traced_bert.pt`,并在先前初始化的 `dummy_input` 上使用: ```python loaded_model = torch.jit.load("traced_bert.pt") loaded_model.eval() all_encoder_layers, pooled_output = loaded_model(*dummy_input) ``` ### 使用 trace 模型进行推断 通过使用其 `__call__` dunder 方法使用 trace 模型进行推断: ```python traced_model(tokens_tensor, segments_tensors) ``` ## 使用 Neuron SDK 将 Hugging Face TorchScript 模型部署到 AWS AWS 引入了用于云端低成本、高性能机器学习推理的 [Amazon EC2 Inf1](https://aws.amazon.com/ec2/instance-types/inf1/) 实例系列。 Inf1 实例由 AWS Inferentia 芯片提供支持,这是一款专为深度学习推理工作负载而构建的定制硬件加速器。 [AWS Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#) 是 Inferentia 的 SDK,支持对 transformers 模型进行跟踪和优化,以便在 Inf1 上部署。Neuron SDK 提供: 1. 简单易用的 API,只需更改一行代码即可为云端推理跟踪和优化 TorchScript 模型。 2. 针对[改进的性能成本](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/)的即插即用性能优化。 3. 支持使用 [PyTorch](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html) 或 [TensorFlow](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html) 构建的 Hugging Face transformers 模型。 ### 影响 基于 [BERT(来自 Transformers 的双向编码器表示)](https://huggingface.co/docs/transformers/main/model_doc/bert)架构的 transformers 模型,或其变体,如 [distilBERT](https://huggingface.co/docs/transformers/main/model_doc/distilbert) 和 [roBERTa](https://huggingface.co/docs/transformers/main/model_doc/roberta) 在 Inf1 上运行最佳, 可用于生成抽取式问答、序列分类和标记分类等任务。然而,文本生成任务仍可以适应在 Inf1 上运行, 如这篇 [AWS Neuron MarianMT 教程](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html)所述。 有关可以直接在 Inferentia 上转换的模型的更多信息,请参阅 Neuron 文档的[模型架构适配](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia)章节。 ### 依赖关系 使用 AWS Neuron 将模型转换为模型需要一个 [Neuron SDK 环境](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide), 它已经预先配置在 [AWS 深度学习 AMI](https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html)上。 ### 将模型转换为 AWS Neuron 使用与 [Python 中使用 TorchScript](torchscript#using-torchscript-in-python) 相同的代码来跟踪 `BertModel` 以将模型转换为 AWS NEURON。导入 `torch.neuron` 框架扩展以通过 Python API 访问 Neuron SDK 的组件: ```python from transformers import BertModel, BertTokenizer, BertConfig import torch import torch.neuron ``` 您只需要修改下面这一行: ```diff - torch.jit.trace(model, [tokens_tensor, segments_tensors]) + torch.neuron.trace(model, [token_tensor, segments_tensors]) ``` 这样就能使 Neuron SDK 跟踪模型并对其进行优化,以在 Inf1 实例上运行。 要了解有关 AWS Neuron SDK 功能、工具、示例教程和最新更新的更多信息, 请参阅 [AWS NeuronSDK 文档](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html)。
transformers/docs/source/zh/torchscript.md/0
{ "file_path": "transformers/docs/source/zh/torchscript.md", "repo_id": "transformers", "token_count": 4763 }
388
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pretraining the library models for T5-like span-masked language modeling on a text file or a dataset. Here is the full list of checkpoints on the hub that can be pretrained by this script: https://huggingface.co/models?filter=t5 """ import json import logging import math import os import sys import time from dataclasses import asdict, dataclass, field # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. from enum import Enum from itertools import chain from pathlib import Path from typing import Optional import flax import jax import jax.numpy as jnp import numpy as np import optax from datasets import load_dataset from flax import jax_utils, traverse_util from flax.jax_utils import pad_shard_unpad from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import HfApi from tqdm import tqdm from transformers import ( CONFIG_MAPPING, FLAX_MODEL_FOR_MASKED_LM_MAPPING, AutoTokenizer, BatchEncoding, FlaxT5ForConditionalGeneration, HfArgumentParser, PreTrainedTokenizerBase, T5Config, is_tensorboard_available, set_seed, ) from transformers.models.t5.modeling_flax_t5 import shift_tokens_right from transformers.utils import send_example_telemetry MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class TrainingArguments: output_dir: str = field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory. " "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."}) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} ) per_device_eval_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} ) learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."}) weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."}) adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}) adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"}) adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}) adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."}) num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."}) save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."}) eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."}) seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) push_to_hub: bool = field( default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."} ) hub_model_id: str = field( default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) def __post_init__(self): if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ d = asdict(self) for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): d[k] = [x.value for x in v] if k.endswith("_token"): d[k] = f"<{k.upper()}>" return d @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `hf auth login` (stored in `~/.huggingface`)." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization and masking. Sequences longer than this" " will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for span masked language modeling loss"} ) mean_noise_span_length: float = field( default=3.0, metadata={"help": "Mean span length of masked tokens"}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def compute_input_and_target_lengths(inputs_length, noise_density, mean_noise_span_length): """This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2466>`__ . Training parameters to avoid padding with random_spans_noise_mask. When training a model with random_spans_noise_mask, we would like to set the other training hyperparmeters in a way that avoids padding. This function helps us compute these hyperparameters. We assume that each noise span in the input is replaced by extra_tokens_per_span_inputs sentinel tokens, and each non-noise span in the targets is replaced by extra_tokens_per_span_targets sentinel tokens. This function tells us the required number of tokens in the raw example (for split_tokens()) as well as the length of the encoded targets. Note that this function assumes the inputs and targets will have EOS appended and includes that in the reported length. Args: inputs_length: an integer - desired length of the tokenized inputs sequence noise_density: a float mean_noise_span_length: a float Returns: tokens_length: length of original text in tokens targets_length: an integer - length in tokens of encoded targets sequence """ def _tokens_length_to_inputs_length_targets_length(tokens_length): num_noise_tokens = int(round(tokens_length * noise_density)) num_nonnoise_tokens = tokens_length - num_noise_tokens num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length)) # inputs contain all nonnoise tokens, sentinels for all noise spans # and one EOS token. _input_length = num_nonnoise_tokens + num_noise_spans + 1 _output_length = num_noise_tokens + num_noise_spans + 1 return _input_length, _output_length tokens_length = inputs_length while _tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0] <= inputs_length: tokens_length += 1 inputs_length, targets_length = _tokens_length_to_inputs_length_targets_length(tokens_length) # minor hack to get the targets length to be equal to inputs length # which is more likely to have been set to a nice round number. if noise_density == 0.5 and targets_length > inputs_length: tokens_length -= 1 targets_length -= 1 return tokens_length, targets_length @flax.struct.dataclass class FlaxDataCollatorForT5MLM: """ Data collator used for T5 span-masked language modeling. It is made sure that after masking the inputs are of length `data_args.max_seq_length` and targets are also of fixed length. For more information on how T5 span-masked language modeling works, one can take a look at the `official paper <https://huggingface.co/papers/1910.10683>`__ or the `official code for preprocessing <https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/data/preprocessors.py>`__ . Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. noise_density (:obj:`float`): The probability with which to (randomly) mask tokens in the input. mean_noise_span_length (:obj:`float`): The average span length of the masked tokens. input_length (:obj:`int`): The expected input length after masking. target_length (:obj:`int`): The expected target length after masking. pad_token_id: (:obj:`int`): The pad token id of the model decoder_start_token_id: (:obj:`int): The decoder start token id of the model """ tokenizer: PreTrainedTokenizerBase noise_density: float mean_noise_span_length: float input_length: int target_length: int pad_token_id: int decoder_start_token_id: int def __call__(self, examples: list[dict[str, np.ndarray]]) -> BatchEncoding: # convert list to dict and tensorize input batch = BatchEncoding( {k: np.array([examples[i][k] for i in range(len(examples))]) for k, v in examples[0].items()} ) input_ids = batch["input_ids"] batch_size, expandend_input_length = input_ids.shape mask_indices = np.asarray([self.random_spans_noise_mask(expandend_input_length) for i in range(batch_size)]) labels_mask = ~mask_indices input_ids_sentinel = self.create_sentinel_ids(mask_indices.astype(np.int8)) labels_sentinel = self.create_sentinel_ids(labels_mask.astype(np.int8)) batch["input_ids"] = self.filter_input_ids(input_ids, input_ids_sentinel) batch["labels"] = self.filter_input_ids(input_ids, labels_sentinel) if batch["input_ids"].shape[-1] != self.input_length: raise ValueError( f"`input_ids` are incorrectly preprocessed. `input_ids` length is {batch['input_ids'].shape[-1]}, but" f" should be {self.input_length}." ) if batch["labels"].shape[-1] != self.target_length: raise ValueError( f"`labels` are incorrectly preprocessed. `labels` length is {batch['labels'].shape[-1]}, but should be" f" {self.target_length}." ) # to check that tokens are correctly preprocessed, one can run `self.tokenizer.batch_decode(input_ids)` and `self.tokenizer.batch_decode(labels)` here... batch["decoder_input_ids"] = shift_tokens_right( batch["labels"], self.pad_token_id, self.decoder_start_token_id ) return batch def create_sentinel_ids(self, mask_indices): """ Sentinel ids creation given the indices that should be masked. The start indices of each mask are replaced by the sentinel ids in increasing order. Consecutive mask indices to be deleted are replaced with `-1`. """ start_indices = mask_indices - np.roll(mask_indices, 1, axis=-1) * mask_indices start_indices[:, 0] = mask_indices[:, 0] sentinel_ids = np.where(start_indices != 0, np.cumsum(start_indices, axis=-1), start_indices) sentinel_ids = np.where(sentinel_ids != 0, (len(self.tokenizer) - sentinel_ids), 0) sentinel_ids -= mask_indices - start_indices return sentinel_ids def filter_input_ids(self, input_ids, sentinel_ids): """ Puts sentinel mask on `input_ids` and fuse consecutive mask tokens into a single mask token by deleting. This will reduce the sequence length from `expanded_inputs_length` to `input_length`. """ batch_size = input_ids.shape[0] input_ids_full = np.where(sentinel_ids != 0, sentinel_ids, input_ids) # input_ids tokens and sentinel tokens are >= 0, tokens < 0 are # masked tokens coming after sentinel tokens and should be removed input_ids = input_ids_full[input_ids_full >= 0].reshape((batch_size, -1)) input_ids = np.concatenate( [input_ids, np.full((batch_size, 1), self.tokenizer.eos_token_id, dtype=np.int32)], axis=-1 ) return input_ids def random_spans_noise_mask(self, length): """This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2682>`__ . Noise mask consisting of random spans of noise tokens. The number of noise tokens and the number of noise spans and non-noise spans are determined deterministically as follows: num_noise_tokens = round(length * noise_density) num_nonnoise_spans = num_noise_spans = round(num_noise_tokens / mean_noise_span_length) Spans alternate between non-noise and noise, beginning with non-noise. Subject to the above restrictions, all masks are equally likely. Args: length: an int32 scalar (length of the incoming token sequence) noise_density: a float - approximate density of output mask mean_noise_span_length: a number Returns: a boolean tensor with shape [length] """ orig_length = length num_noise_tokens = int(np.round(length * self.noise_density)) num_nonnoise_tokens = length - num_noise_tokens # avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens. num_noise_tokens = min(max(num_noise_tokens, 1), length - 1) # num_noise_tokens should be less than num_noise_tokens and num_nonnoise_tokens num_noise_spans = int(np.round(min(num_noise_tokens, num_nonnoise_tokens) / self.mean_noise_span_length)) # avoid degeneracy by ensuring positive number of noise spans num_noise_spans = max(num_noise_spans, 1) # pick the lengths of the noise spans and the non-noise spans def _random_segmentation(num_items, num_segments): """Partition a sequence of items randomly into non-empty segments. Args: num_items: an integer scalar > 0 num_segments: an integer scalar in [1, num_items] Returns: a Tensor with shape [num_segments] containing positive integers that add up to num_items """ mask_indices = np.arange(num_items - 1) < (num_segments - 1) np.random.shuffle(mask_indices) first_in_segment = np.pad(mask_indices, [[1, 0]]) segment_id = np.cumsum(first_in_segment) # count length of sub segments assuming that list is sorted _, segment_length = np.unique(segment_id, return_counts=True) return segment_length noise_span_lengths = _random_segmentation(num_noise_tokens, num_noise_spans) nonnoise_span_lengths = _random_segmentation(num_nonnoise_tokens, num_noise_spans) interleaved_span_lengths = np.reshape( np.stack([nonnoise_span_lengths, noise_span_lengths], axis=1), [num_noise_spans * 2] ) span_starts = np.cumsum(interleaved_span_lengths)[:-1] span_start_indicator = np.zeros((length,), dtype=np.int8) span_start_indicator[span_starts] = True span_num = np.cumsum(span_start_indicator) is_noise = np.equal(span_num % 2, 1) return is_noise[:orig_length] def generate_batch_splits(samples_idx: np.ndarray, batch_size: int, drop_last=True) -> np.ndarray: """Generate batches of data for a specified batch size from sample indices. If the dataset size is not divisible by the batch size and `drop_last` is `True`, the last incomplete batch is dropped. Else, it is returned.""" num_samples = len(samples_idx) if drop_last: samples_to_remove = num_samples % batch_size if samples_to_remove != 0: samples_idx = samples_idx[:-samples_to_remove] sections_split = num_samples // batch_size samples_idx = samples_idx.reshape((sections_split, batch_size)) else: sections_split = math.ceil(num_samples / batch_size) samples_idx = np.array_split(samples_idx, sections_split) return samples_idx def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_t5_mlm", model_args, data_args, framework="flax") if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO, datefmt="[%X]", ) # Log on each process the small summary: logger = logging.getLogger(__name__) # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Handle the repository creation if training_args.push_to_hub: # Retrieve of infer repo_name repo_name = training_args.hub_model_id if repo_name is None: repo_name = Path(training_args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, trust_remote_code=data_args.trust_remote_code, ) if "validation" not in datasets: datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, trust_remote_code=data_args.trust_remote_code, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, trust_remote_code=data_args.trust_remote_code, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if extension == "txt": extension = "text" datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) if "validation" not in datasets: datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.config_name: config = T5Config.from_pretrained( model_args.config_name, cache_dir=model_args.cache_dir, vocab_size=len(tokenizer), token=model_args.token, ) elif model_args.model_name_or_path: config = T5Config.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=model_args.token, ) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts. # Since we make sure that all sequences are of the same length, no attention_mask is needed. def tokenize_function(examples): return tokenizer(examples[text_column_name], return_attention_mask=False) tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) # T5-like span masked language modeling will fuse consecutively masked tokens to a single sentinel token. # To ensure that the input length is `max_seq_length`, we need to increase the maximum length # according to `mlm_probability` and `mean_noise_span_length`. We can also define the label length accordingly. expanded_inputs_length, targets_length = compute_input_and_target_lengths( inputs_length=max_seq_length, noise_density=data_args.mlm_probability, mean_noise_span_length=data_args.mean_noise_span_length, ) # Main data processing function that will concatenate all texts from our dataset and generate chunks of expanded_inputs_length. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= expanded_inputs_length: total_length = (total_length // expanded_inputs_length) * expanded_inputs_length # Split by chunks of max_len. result = { k: [t[i : i + expanded_inputs_length] for i in range(0, total_length, expanded_inputs_length)] for k, t in concatenated_examples.items() } return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a # remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value # might be slower to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/process#map tokenized_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Enable tensorboard only on the master node has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) dropout_rngs = jax.random.split(rng, jax.local_device_count()) if model_args.model_name_or_path: model = FlaxT5ForConditionalGeneration.from_pretrained( model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), token=model_args.token, ) else: config.vocab_size = len(tokenizer) model = FlaxT5ForConditionalGeneration( config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), ) # Data collator # This one will take care of randomly masking the tokens. data_collator = FlaxDataCollatorForT5MLM( tokenizer=tokenizer, noise_density=data_args.mlm_probability, mean_noise_span_length=data_args.mean_noise_span_length, input_length=max_seq_length, target_length=targets_length, pad_token_id=model.config.pad_token_id, decoder_start_token_id=model.config.decoder_start_token_id, ) # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() num_train_steps = len(tokenized_datasets["train"]) // train_batch_size * num_epochs num_of_hosts = jax.process_count() current_host_idx = jax.process_index() # Create learning rate schedule warmup_fn = optax.linear_schedule( init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps ) decay_fn = optax.linear_schedule( init_value=training_args.learning_rate, end_value=0, transition_steps=num_train_steps - training_args.warmup_steps, ) linear_decay_lr_schedule_fn = optax.join_schedules( schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps] ) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layernorm", "layer_norm", "ln"] layer_norm_named_params = { layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params if layer_norm_name in "".join(layer).lower() } flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) # create adam optimizer if training_args.adafactor: # We use the default parameters here to initialize adafactor, # For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74 optimizer = optax.adafactor( learning_rate=linear_decay_lr_schedule_fn, ) else: optimizer = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) # Setup train state state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer) # Define gradient update step fn def train_step(state, batch, dropout_rng): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) def loss_fn(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] # compute loss loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() return loss grad_fn = jax.value_and_grad(loss_fn) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) metrics = jax.lax.pmean( {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" ) return new_state, metrics, new_dropout_rng # Create parallel version of the train step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Define eval fn def eval_step(params, batch): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] # compute loss loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) # compute accuracy accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) # summarize metrics metrics = {"loss": loss.mean(), "accuracy": accuracy.mean()} metrics = jax.lax.pmean(metrics, axis_name="batch") return metrics p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) train_time = 0 epochs = tqdm(range(num_epochs), desc="Epoch ... ", position=0) for epoch in epochs: # ======================== Training ================================ train_start = time.time() train_metrics = [] # Create sampling rng rng, input_rng = jax.random.split(rng) # Generate an epoch by shuffling sampling indices from the train dataset num_train_samples = len(tokenized_datasets["train"]) # Avoid using jax.numpy here in case of TPU training train_samples_idx = np.random.permutation(np.arange(num_train_samples)) train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size) # Gather the indexes for creating the batch and do a training step for step, batch_idx in enumerate(tqdm(train_batch_idx, desc="Training...", position=1)): samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples) local_host_model_inputs = { key: np.split(model_inputs.data[key], num_of_hosts, axis=0)[current_host_idx] for key, value in model_inputs.data.items() } # Model forward model_inputs = shard(local_host_model_inputs) state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs) train_metrics.append(train_metric) cur_step = epoch * (num_train_samples // train_batch_size) + step if cur_step % training_args.logging_steps == 0 and cur_step > 0: # Save metrics train_metric = jax_utils.unreplicate(train_metric) train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, cur_step) epochs.write( f"Step... ({cur_step} | Loss: {train_metric['loss'].mean()}, Learning Rate:" f" {train_metric['learning_rate'].mean()})" ) train_metrics = [] if cur_step % training_args.eval_steps == 0 and cur_step > 0: # ======================== Evaluating ============================== num_eval_samples = len(tokenized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(num_eval_samples) eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False) eval_metrics = [] for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples) # Model forward metrics = pad_shard_unpad(p_eval_step, static_return=True)( state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size ) eval_metrics.append(metrics) # get eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # Update progress bar epochs.write(f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})") # Save metrics if has_tensorboard and jax.process_index() == 0: write_eval_metric(summary_writer, eval_metrics, cur_step) if cur_step % training_args.save_steps == 0 and cur_step > 0: # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: api.upload_folder( commit_message=f"Saving weights and logs of step {cur_step}", folder_path=training_args.output_dir, repo_id=repo_id, repo_type="model", token=training_args.hub_token, ) # Eval after training if training_args.do_eval: num_eval_samples = len(tokenized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(num_eval_samples) eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False) eval_metrics = [] for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples) # Model forward metrics = pad_shard_unpad(p_eval_step, static_return=True)( state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size ) eval_metrics.append(metrics) # get eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(lambda metric: jnp.mean(metric).item(), eval_metrics) if jax.process_index() == 0: eval_metrics = {f"eval_{metric_name}": value for metric_name, value in eval_metrics.items()} path = os.path.join(training_args.output_dir, "eval_results.json") with open(path, "w") as f: json.dump(eval_metrics, f, indent=4, sort_keys=True) if __name__ == "__main__": main()
transformers/examples/flax/language-modeling/run_t5_mlm_flax.py/0
{ "file_path": "transformers/examples/flax/language-modeling/run_t5_mlm_flax.py", "repo_id": "transformers", "token_count": 18771 }
389
<!--- Copyright 2021 The Google Flax Team Authors and HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Token classification examples Fine-tuning the library models for token classification task such as Named Entity Recognition (NER), Parts-of-speech tagging (POS) or phrase extraction (CHUNKS). The main script run_flax_ner.py leverages the 🤗 Datasets library. You can easily customize it to your needs if you need extra processing on your datasets. It will either run on a datasets hosted on our hub or with your own text files for training and validation, you might just need to add some tweaks in the data preprocessing. The following example fine-tunes BERT on CoNLL-2003: ```bash python run_flax_ner.py \ --model_name_or_path google-bert/bert-base-cased \ --dataset_name conll2003 \ --max_seq_length 128 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --per_device_train_batch_size 4 \ --output_dir ./bert-ner-conll2003 \ --eval_steps 300 \ --push_to_hub ``` Using the command above, the script will train for 3 epochs and run eval after each epoch. Metrics and hyperparameters are stored in Tensorflow event files in `--output_dir`. You can see the results by running `tensorboard` in that directory: ```bash $ tensorboard --logdir . ``` or directly on the hub under *Training metrics*. sample Metrics - [tfhub.dev](https://tensorboard.dev/experiment/u52qsBIpQSKEEXEJd2LVYA)
transformers/examples/flax/token-classification/README.md/0
{ "file_path": "transformers/examples/flax/token-classification/README.md", "repo_id": "transformers", "token_count": 557 }
390
# Install example requirements pip install -r ../requirements.txt # Download glue data python3 ../../utils/download_glue_data.py export TASK=mrpc export DATA_DIR=./glue_data/MRPC/ export MAX_LENGTH=128 export LEARNING_RATE=2e-5 export BERT_MODEL=bert-base-cased export BATCH_SIZE=32 export NUM_EPOCHS=3 export SEED=2 export OUTPUT_DIR_NAME=mrpc-pl-bert export CURRENT_DIR=${PWD} export OUTPUT_DIR=${CURRENT_DIR}/${OUTPUT_DIR_NAME} # Make output directory if it doesn't exist mkdir -p $OUTPUT_DIR # Add parent directory to python path to access lightning_base.py export PYTHONPATH="../":"${PYTHONPATH}" python3 run_glue.py --gpus 1 --data_dir $DATA_DIR \ --task $TASK \ --model_name_or_path $BERT_MODEL \ --output_dir $OUTPUT_DIR \ --max_seq_length $MAX_LENGTH \ --learning_rate $LEARNING_RATE \ --num_train_epochs $NUM_EPOCHS \ --train_batch_size $BATCH_SIZE \ --seed $SEED \ --do_train \ --do_predict
transformers/examples/legacy/pytorch-lightning/run_glue.sh/0
{ "file_path": "transformers/examples/legacy/pytorch-lightning/run_glue.sh", "repo_id": "transformers", "token_count": 360 }
391
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import fire from tqdm import tqdm def download_wmt_dataset(src_lang="ro", tgt_lang="en", dataset="wmt16", save_dir=None) -> None: """Download a dataset using the datasets package and save it to the format expected by finetune.py Format of save_dir: train.source, train.target, val.source, val.target, test.source, test.target. Args: src_lang: <str> source language tgt_lang: <str> target language dataset: <str> wmt16, wmt17, etc. wmt16 is a good start as it's small. To get the full list run `import datasets; print([d.id for d in datasets.list_datasets() if "wmt" in d.id])` save_dir: <str>, where to save the datasets, defaults to f'{dataset}-{src_lang}-{tgt_lang}' Usage: >>> download_wmt_dataset('ro', 'en', dataset='wmt16') # saves to wmt16-ro-en """ try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("run pip install datasets") pair = f"{src_lang}-{tgt_lang}" print(f"Converting {dataset}-{pair}") ds = datasets.load_dataset(dataset, pair) if save_dir is None: save_dir = f"{dataset}-{pair}" save_dir = Path(save_dir) save_dir.mkdir(exist_ok=True) for split in ds: print(f"Splitting {split} with {ds[split].num_rows} records") # to save to val.source, val.target like summary datasets fn = "val" if split == "validation" else split src_path = save_dir.joinpath(f"{fn}.source") tgt_path = save_dir.joinpath(f"{fn}.target") src_fp = src_path.open("w+") tgt_fp = tgt_path.open("w+") # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split]): ex = x["translation"] src_fp.write(ex[src_lang] + "\n") tgt_fp.write(ex[tgt_lang] + "\n") print(f"Saved {dataset} dataset to {save_dir}") if __name__ == "__main__": fire.Fire(download_wmt_dataset)
transformers/examples/legacy/seq2seq/download_wmt.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/download_wmt.py", "repo_id": "transformers", "token_count": 1018 }
392