text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
.tokenized-text { width:100%; padding:2rem; max-height: 400px; overflow-y: auto; box-sizing:border-box; line-height:4rem; /* Lots of space between lines */ font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace; box-shadow: 2px 2px 2px rgba(0,0,0,0.2); background-color: rgba(0,0,0,0.01); letter-spacing:2px; /* Give some extra separation between chars */ } .non-token{ /* White space and other things the tokenizer ignores*/ white-space: pre; letter-spacing:4px; border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/ border-bottom:1px solid #A0A0A0; line-height: 1rem; height: calc(100% - 2px); } .token { white-space: pre; position:relative; color:black; letter-spacing:2px; } .annotation{ white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */ border-radius:4px; position:relative; width:fit-content; } .annotation:before { /*The before holds the text and the after holds the background*/ z-index:1000; /* Make sure this is above the background */ content:attr(data-label); /* The annotations label is on a data attribute */ color:white; position:absolute; font-size:1rem; text-align:center; font-weight:bold; top:1.75rem; line-height:0; left:0; width:100%; padding:0.5rem 0; /* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/ overflow: hidden; white-space: nowrap; text-overflow:ellipsis; } .annotation:after { content:attr(data-label); /* The content defines the width of the annotation*/ position:absolute; font-size:0.75rem; text-align:center; font-weight:bold; text-overflow:ellipsis; top:1.75rem; line-height:0; overflow: hidden; white-space: nowrap; left:0; width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ padding:0.5rem 0; /* Nast hack below: We set the annotations color in code because we don't know the colors at css time. But you can't pass a color as a data attribute to get it into the pseudo element (this thing) So to get around that, annotations have the color set on them with a style attribute and then we can get the color with currentColor. Annotations wrap tokens and tokens set the color back to black */ background-color: currentColor; } .annotation:hover::after, .annotation:hover::before{ /* When the user hovers over an annotation expand the label to display in full */ min-width: fit-content; } .annotation:hover{ /* Emphasize the annotation start end with a border on hover*/ border-color: currentColor; border: 2px solid; } .special-token:not(:empty){ /* A none empty special token is like UNK (as opposed to CLS which has no representation in the text ) */ position:relative; } .special-token:empty::before{ /* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/ content:attr(data-stok); background:#202020; font-size:0.75rem; color:white; margin: 0 0.25rem; padding: 0.25rem; border-radius:4px } .special-token:not(:empty):before { /* Special tokens that have text (UNK) are displayed above the actual text*/ content:attr(data-stok); position:absolute; bottom:1.75rem; min-width:100%; width:100%; height:1rem; line-height:1rem; font-size:1rem; text-align:center; color:white; font-weight:bold; background:#202020; border-radius:10%; } /* We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations instead we apply even and odd class at generation time and color them that way */ .even-token{ background:#DCDCDC ; border: 1px solid #DCDCDC; } .odd-token{ background:#A0A0A0; border: 1px solid #A0A0A0; } .even-token.multi-token,.odd-token.multi-token{ background: repeating-linear-gradient( 45deg, transparent, transparent 1px, #ccc 1px, #ccc 1px ), /* on "bottom" */ linear-gradient( to bottom, #FFB6C1, #999 ); } .multi-token:hover::after { content:"This char has more than 1 token"; /* The content defines the width of the annotation*/ color:white; background-color: black; position:absolute; font-size:0.75rem; text-align:center; font-weight:bold; text-overflow:ellipsis; top:1.75rem; line-height:0; overflow: hidden; white-space: nowrap; left:0; width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ padding:0.5rem 0; }
tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css", "repo_id": "tokenizers", "token_count": 1806 }
338
use std::sync::{Arc, RwLock}; use pyo3::exceptions; use pyo3::exceptions::PyException; use pyo3::prelude::*; use pyo3::types::*; use serde::ser::SerializeStruct; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::normalizer::SplitDelimiterBehavior; use tk::pre_tokenizers::bert::BertPreTokenizer; use tk::pre_tokenizers::byte_level::ByteLevel; use tk::pre_tokenizers::delimiter::CharDelimiterSplit; use tk::pre_tokenizers::digits::Digits; use tk::pre_tokenizers::fixed_length::FixedLength; use tk::pre_tokenizers::metaspace::{Metaspace, PrependScheme}; use tk::pre_tokenizers::punctuation::Punctuation; use tk::pre_tokenizers::split::Split; use tk::pre_tokenizers::unicode_scripts::UnicodeScripts; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use tk::tokenizer::Offsets; use tk::{PreTokenizedString, PreTokenizer}; use tokenizers as tk; use super::error::ToPyResult; use super::utils::*; /// Base class for all pre-tokenizers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// PreTokenizer will return an instance of this class when instantiated. #[pyclass( dict, module = "tokenizers.pre_tokenizers", name = "PreTokenizer", subclass )] #[derive(Clone, Serialize, Deserialize)] #[serde(transparent)] pub struct PyPreTokenizer { pub(crate) pretok: PyPreTokenizerTypeWrapper, } impl PyPreTokenizer { #[allow(dead_code)] pub(crate) fn new(pretok: PyPreTokenizerTypeWrapper) -> Self { PyPreTokenizer { pretok } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match self.pretok { PyPreTokenizerTypeWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))? .into_pyobject(py)? .into_any() .into(), PyPreTokenizerTypeWrapper::Single(ref inner) => { match &*inner .as_ref() .read() .map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer"))? { PyPreTokenizerWrapper::Custom(_) => { Py::new(py, base)?.into_pyobject(py)?.into_any().into() } PyPreTokenizerWrapper::Wrapped(inner) => match inner { PreTokenizerWrapper::Whitespace(_) => Py::new(py, (PyWhitespace {}, base))? .into_pyobject(py)? .into_any() .into(), PreTokenizerWrapper::Split(_) => Py::new(py, (PySplit {}, base))? .into_pyobject(py)? .into_any() .into(), PreTokenizerWrapper::Punctuation(_) => { Py::new(py, (PyPunctuation {}, base))? .into_pyobject(py)? .into_any() .into() } PreTokenizerWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))? .into_pyobject(py)? .into_any() .into(), PreTokenizerWrapper::Metaspace(_) => Py::new(py, (PyMetaspace {}, base))? .into_pyobject(py)? .into_any() .into(), PreTokenizerWrapper::Delimiter(_) => { Py::new(py, (PyCharDelimiterSplit {}, base))? .into_pyobject(py)? .into_any() .into() } PreTokenizerWrapper::WhitespaceSplit(_) => { Py::new(py, (PyWhitespaceSplit {}, base))? .into_pyobject(py)? .into_any() .into() } PreTokenizerWrapper::ByteLevel(_) => Py::new(py, (PyByteLevel {}, base))? .into_pyobject(py)? .into_any() .into(), PreTokenizerWrapper::BertPreTokenizer(_) => { Py::new(py, (PyBertPreTokenizer {}, base))? .into_pyobject(py)? .into_any() .into() } PreTokenizerWrapper::Digits(_) => Py::new(py, (PyDigits {}, base))? .into_pyobject(py)? .into_any() .into(), PreTokenizerWrapper::UnicodeScripts(_) => { Py::new(py, (PyUnicodeScripts {}, base))? .into_pyobject(py)? .into_any() .into() } PreTokenizerWrapper::FixedLength(_) => { Py::new(py, (PyFixedLength {}, base))? .into_pyobject(py)? .into_any() .into() } }, } } }) } } impl PreTokenizer for PyPreTokenizer { fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> tk::Result<()> { self.pretok.pre_tokenize(normalized) } } #[pymethods] impl PyPreTokenizer { #[staticmethod] fn custom(pretok: PyObject) -> Self { PyPreTokenizer { pretok: PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(pretok)).into(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.pretok).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PreTokenizer: {e}" )) })?; Ok(PyBytes::new(py, data.as_bytes()).into()) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&[u8]>(py) { Ok(s) => { let unpickled = serde_json::from_slice(s).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PreTokenizer: {e}" )) })?; self.pretok = unpickled; Ok(()) } Err(e) => Err(e), } } /// Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place /// /// This method allows to modify a :class:`~tokenizers.PreTokenizedString` to /// keep track of the pre-tokenization, and leverage the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of /// the pre-tokenization of a raw string, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` /// /// Args: /// pretok (:class:`~tokenizers.PreTokenizedString): /// The pre-tokenized string on which to apply this /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` #[pyo3(text_signature = "(self, pretok)")] fn pre_tokenize(&self, pretok: &mut PyPreTokenizedString) -> PyResult<()> { ToPyResult(self.pretok.pre_tokenize(&mut pretok.pretok)).into() } /// Pre tokenize the given string /// /// This method provides a way to visualize the effect of a /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the /// alignment, nor does it provide all the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` /// /// Args: /// sequence (:obj:`str`): /// A string to pre-tokeize /// /// Returns: /// :obj:`List[Tuple[str, Offsets]]`: /// A list of tuple with the pre-tokenized parts and their offsets #[pyo3(text_signature = "(self, sequence)")] fn pre_tokenize_str(&self, s: &str) -> PyResult<Vec<(String, Offsets)>> { let mut pretokenized = tk::tokenizer::PreTokenizedString::from(s); ToPyResult(self.pretok.pre_tokenize(&mut pretokenized)).into_py()?; Ok(pretokenized .get_splits(tk::OffsetReferential::Original, tk::OffsetType::Char) .into_iter() .map(|(s, o, _)| (s.to_owned(), o)) .collect()) } fn __repr__(&self) -> PyResult<String> { crate::utils::serde_pyo3::repr(self) .map_err(|e| exceptions::PyException::new_err(e.to_string())) } fn __str__(&self) -> PyResult<String> { crate::utils::serde_pyo3::to_string(self) .map_err(|e| exceptions::PyException::new_err(e.to_string())) } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref pretok)) = *single.read().expect("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer") { pretok.$($name)+ } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().expect("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer") { pretok.$name = $value; } } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().expect("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer") { pretok.$name($value); } } }}; } /// ByteLevel PreTokenizer /// /// This pre-tokenizer takes care of replacing all bytes of the given string /// with a corresponding representation, as well as splitting into words. /// /// Args: /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. /// use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Set this to :obj:`False` to prevent this `pre_tokenizer` from using /// the GPT2 specific regexp for spliting on whitespace. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "ByteLevel")] pub struct PyByteLevel {} #[pymethods] impl PyByteLevel { #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, ByteLevel, add_prefix_space, add_prefix_space); } #[getter] fn get_use_regex(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, use_regex) } #[setter] fn set_use_regex(self_: PyRef<Self>, use_regex: bool) { setter!(self_, ByteLevel, use_regex, use_regex); } #[getter] fn get_trim_offsets(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, trim_offsets) } #[setter] fn set_trim_offsets(self_: PyRef<Self>, trim_offsets: bool) { setter!(self_, ByteLevel, trim_offsets, trim_offsets) } #[new] #[pyo3(signature = (add_prefix_space = true, use_regex = true, **_kwargs), text_signature = "(self, add_prefix_space=True, use_regex=True)")] fn new( add_prefix_space: bool, use_regex: bool, _kwargs: Option<&Bound<'_, PyDict>>, ) -> (Self, PyPreTokenizer) { ( PyByteLevel {}, ByteLevel::default() .add_prefix_space(add_prefix_space) .use_regex(use_regex) .into(), ) } /// Returns the alphabet used by this PreTokenizer. /// /// Since the ByteLevel works as its name suggests, at the byte level, it /// encodes each byte value to a unique visible character. This means that there is a /// total of 256 different characters composing this alphabet. /// /// Returns: /// :obj:`List[str]`: A list of characters that compose the alphabet #[staticmethod] #[pyo3(text_signature = "()")] fn alphabet() -> Vec<String> { ByteLevel::alphabet() .into_iter() .map(|c| c.to_string()) .collect() } } /// This pre-tokenizer splits on word boundaries according to the `\w+|[^\w\s]+` /// regex pattern. It splits on word characters or characters that aren't words or /// whitespaces (punctuation such as hyphens, apostrophes, commas, etc.). /// /// Example: /// Use the `Whitespace` function as shown below:: /// /// ```python /// from tokenizers.pre_tokenizers import Whitespace /// /// pre_tokenizer = Whitespace() /// text = "Hello, world! Let's try the Whitespace pre-tokenizer." /// pre_tokenizer.pre_tokenize_str(text) /// [('Hello', (0, 5)), /// (',', (5, 6)), /// ('world', (7, 12)), /// ('!', (12, 13)), /// ('Let', (14, 17)), /// ("'", (17, 18)), /// ('s', (18, 19)), /// ('try', (20, 23)), /// ('the', (24, 27)), /// ('Whitespace', (28, 38)), /// ('pre', (39, 42)), /// ('-', (42, 43)), /// ('tokenizer', (43, 52)), /// ('.', (52, 53))] /// ``` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Whitespace")] pub struct PyWhitespace {} #[pymethods] impl PyWhitespace { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyWhitespace {}, Whitespace {}.into()) } } /// This pre-tokenizer simply splits on the whitespace. Works like `.split()` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "WhitespaceSplit")] pub struct PyWhitespaceSplit {} #[pymethods] impl PyWhitespaceSplit { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyWhitespaceSplit {}, WhitespaceSplit.into()) } } /// Split PreTokenizer /// /// This versatile pre-tokenizer splits using the provided pattern and /// according to the provided behavior. The pattern can be inverted by /// making use of the invert flag. /// /// Args: /// pattern (:obj:`str` or :class:`~tokenizers.Regex`): /// A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`. /// If you want to use a regex pattern, it has to be wrapped around a `tokenizers.Regex`, /// otherwise we consider is as a string pattern. For example `pattern="|"` /// means you want to split on `|` (imagine a csv file for example), while /// `pattern=tokenizers.Regex("1|2")` means you split on either '1' or '2'. /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", /// "contiguous" /// /// invert (:obj:`bool`, `optional`, defaults to :obj:`False`): /// Whether to invert the pattern. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Split")] pub struct PySplit {} #[pymethods] impl PySplit { #[new] #[pyo3(signature = (pattern, behavior, invert = false), text_signature = "(self, pattern, behavior, invert=False)")] fn new( pattern: PyPattern, behavior: PySplitDelimiterBehavior, invert: bool, ) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PySplit {}, ToPyResult(Split::new(pattern, behavior.into(), invert)) .into_py()? .into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> { PyTuple::new(py, [" ", "removed"]) } #[getter] fn get_pattern(_self: PyRef<Self>) -> PyResult<()> { Err(PyException::new_err("Cannot get pattern")) } #[setter] fn set_pattern(_self: PyRef<Self>, _pattern: PyPattern) -> PyResult<()> { Err(PyException::new_err( "Cannot set pattern, please instantiate a new split pattern instead", )) } #[getter] fn get_behavior(self_: PyRef<Self>) -> String { getter!(self_, Split, behavior).to_string().to_lowercase() } #[setter] fn set_behavior(self_: PyRef<Self>, behavior: String) -> PyResult<()> { let behavior = match behavior.as_ref() { "removed" => SplitDelimiterBehavior::Removed, "isolated" => SplitDelimiterBehavior::Isolated, "merged_with_previous" => SplitDelimiterBehavior::MergedWithPrevious, "merged_with_next" => SplitDelimiterBehavior::MergedWithNext, "contiguous" => SplitDelimiterBehavior::Contiguous, _ => { return Err(exceptions::PyValueError::new_err( "Wrong value for SplitDelimiterBehavior, expected one of: \ `removed, isolated, merged_with_previous, merged_with_next, contiguous`", )) } }; setter!(self_, Split, behavior, behavior); Ok(()) } #[getter] fn get_invert(self_: PyRef<Self>) -> bool { getter!(self_, Split, invert) } #[setter] fn set_invert(self_: PyRef<Self>, invert: bool) { setter!(self_, Split, invert, invert) } } /// This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)` /// /// Args: /// delimiter: str: /// The delimiter char that will be used to split input #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "CharDelimiterSplit")] pub struct PyCharDelimiterSplit {} #[pymethods] impl PyCharDelimiterSplit { #[getter] fn get_delimiter(self_: PyRef<Self>) -> String { getter!(self_, Delimiter, delimiter.to_string()) } #[setter] fn set_delimiter(self_: PyRef<Self>, delimiter: char) { setter!(self_, Delimiter, delimiter, delimiter); } #[new] #[pyo3(text_signature = None)] pub fn new(delimiter: char) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PyCharDelimiterSplit {}, CharDelimiterSplit::new(delimiter).into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> { PyTuple::new(py, [" "]) } } /// BertPreTokenizer /// /// This pre-tokenizer splits tokens on spaces, and also on punctuation. /// Each occurrence of a punctuation character will be treated separately. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "BertPreTokenizer")] pub struct PyBertPreTokenizer {} #[pymethods] impl PyBertPreTokenizer { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyBertPreTokenizer {}, BertPreTokenizer.into()) } } /// This pre-tokenizer simply splits on punctuation as individual characters. /// /// Args: /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next", /// "contiguous" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Punctuation")] pub struct PyPunctuation {} #[pymethods] impl PyPunctuation { #[new] #[pyo3( signature = (behavior = PySplitDelimiterBehavior(SplitDelimiterBehavior::Isolated)), text_signature = "(self, behavior=\"isolated\")")] fn new(behavior: PySplitDelimiterBehavior) -> (Self, PyPreTokenizer) { (PyPunctuation {}, Punctuation::new(behavior.into()).into()) } #[getter] fn get_behavior(self_: PyRef<Self>) -> String { getter!(self_, Punctuation, behavior) .to_string() .to_lowercase() } #[setter] fn set_behavior(self_: PyRef<Self>, behavior: String) -> PyResult<()> { let behavior = match behavior.as_ref() { "removed" => SplitDelimiterBehavior::Removed, "isolated" => SplitDelimiterBehavior::Isolated, "merged_with_previous" => SplitDelimiterBehavior::MergedWithPrevious, "merged_with_next" => SplitDelimiterBehavior::MergedWithNext, "contiguous" => SplitDelimiterBehavior::Contiguous, _ => { return Err(exceptions::PyValueError::new_err( "Wrong value for SplitDelimiterBehavior, expected one of: \ `removed, isolated, merged_with_previous, merged_with_next, contiguous`", )) } }; setter!(self_, Punctuation, behavior, behavior); Ok(()) } } /// This pre-tokenizer composes other pre_tokenizers and applies them in sequence #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Sequence")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] #[pyo3(text_signature = "(self, pretokenizers)")] fn new(pre_tokenizers: &Bound<'_, PyList>) -> PyResult<(Self, PyPreTokenizer)> { let mut sequence = Vec::with_capacity(pre_tokenizers.len()); for n in pre_tokenizers.iter() { let pretokenizer: PyRef<PyPreTokenizer> = n.extract()?; match &pretokenizer.pretok { PyPreTokenizerTypeWrapper::Sequence(inner) => { sequence.extend(inner.iter().cloned()) } PyPreTokenizerTypeWrapper::Single(inner) => sequence.push(inner.clone()), } } Ok(( PySequence {}, PyPreTokenizer::new(PyPreTokenizerTypeWrapper::Sequence(sequence)), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> { PyTuple::new(py, [PyList::empty(py)]) } fn __getitem__(self_: PyRef<'_, Self>, py: Python<'_>, index: usize) -> PyResult<Py<PyAny>> { match &self_.as_ref().pretok { PyPreTokenizerTypeWrapper::Sequence(inner) => match inner.get(index) { Some(item) => PyPreTokenizer::new(PyPreTokenizerTypeWrapper::Single(item.clone())) .get_as_subtype(py), _ => Err(PyErr::new::<pyo3::exceptions::PyIndexError, _>( "Index not found", )), }, _ => Err(PyErr::new::<pyo3::exceptions::PyIndexError, _>( "This processor is not a Sequence, it does not support __getitem__", )), } } fn __setitem__(self_: PyRef<'_, Self>, index: usize, value: Bound<'_, PyAny>) -> PyResult<()> { let pretok: PyPreTokenizer = value.extract()?; let PyPreTokenizerTypeWrapper::Single(pretok) = pretok.pretok else { return Err(PyException::new_err( "pre tokenizer should not be a sequence", )); }; match &self_.as_ref().pretok { PyPreTokenizerTypeWrapper::Sequence(inner) => match inner.get(index) { Some(item) => { *item .write() .map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer"))? = (*pretok .read() .map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer"))?) .clone(); } _ => { return Err(PyErr::new::<pyo3::exceptions::PyIndexError, _>( "Index not found", )) } }, PyPreTokenizerTypeWrapper::Single(_) => { return Err(PyException::new_err("pre tokenizer is not a sequence")) } }; Ok(()) } } pub(crate) fn from_string(string: String) -> Result<PrependScheme, PyErr> { let scheme = match string.as_str() { "first" => PrependScheme::First, "never" => PrependScheme::Never, "always" => PrependScheme::Always, _ => { return Err(exceptions::PyValueError::new_err(format!( "{string} is an unknown variant, should be one of ['first', 'never', 'always']" ))); } }; Ok(scheme) } /// Metaspace pre-tokenizer /// /// This pre-tokenizer replaces any whitespace by the provided replacement character. /// It then tries to split on these spaces. /// /// Args: /// replacement (:obj:`str`, `optional`, defaults to :obj:`โ–`): /// The replacement character. Must be exactly one character. By default we /// use the `โ–` (U+2581) meta symbol (Same as in SentencePiece). /// /// prepend_scheme (:obj:`str`, `optional`, defaults to :obj:`"always"`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. /// Choices: "always", "never", "first". First means the space is only added on the first /// token (relevant when special tokens are used or other pre_tokenizer are used). /// #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Metaspace")] pub struct PyMetaspace {} #[pymethods] impl PyMetaspace { #[getter] fn get_replacement(self_: PyRef<Self>) -> String { getter!(self_, Metaspace, get_replacement().to_string()) } #[setter] fn set_replacement(self_: PyRef<Self>, replacement: char) { setter!(self_, Metaspace, @set_replacement, replacement); } #[getter] fn get_split(self_: PyRef<Self>) -> bool { getter!(self_, Metaspace, get_split()) } #[setter] fn set_split(self_: PyRef<Self>, split: bool) { setter!(self_, Metaspace, @set_split, split); } #[getter] fn get_prepend_scheme(self_: PyRef<Self>) -> String { // Assuming Metaspace has a method to get the prepend_scheme as a string getter!(self_, Metaspace, get_prepend_scheme()).to_string() } #[setter] fn set_prepend_scheme(self_: PyRef<Self>, prepend_scheme: String) -> PyResult<()> { let scheme = from_string(prepend_scheme)?; setter!(self_, Metaspace, @set_prepend_scheme, scheme); Ok(()) } #[new] #[pyo3(signature = (replacement = 'โ–', prepend_scheme=String::from("always"), split=true), text_signature = "(self, replacement=\"_\", prepend_scheme=\"always\", split=True)")] fn new( replacement: char, prepend_scheme: String, split: bool, ) -> PyResult<(Self, PyPreTokenizer)> { // Create a new Metaspace instance let prepend_scheme = from_string(prepend_scheme)?; let new_instance: Metaspace = Metaspace::new(replacement, prepend_scheme, split); Ok((PyMetaspace {}, new_instance.into())) } } /// This pre-tokenizer simply splits using the digits in separate tokens /// /// Args: /// individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`): /// If set to True, digits will each be separated as follows:: /// /// "Call 123 please" -> "Call ", "1", "2", "3", " please" /// /// If set to False, digits will grouped as follows:: /// /// "Call 123 please" -> "Call ", "123", " please" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Digits")] pub struct PyDigits {} #[pymethods] impl PyDigits { #[getter] fn get_individual_digits(self_: PyRef<Self>) -> bool { getter!(self_, Digits, individual_digits) } #[setter] fn set_individual_digits(self_: PyRef<Self>, individual_digits: bool) { setter!(self_, Digits, individual_digits, individual_digits); } #[new] #[pyo3(signature = (individual_digits = false), text_signature = "(self, individual_digits=False)")] fn new(individual_digits: bool) -> (Self, PyPreTokenizer) { (PyDigits {}, Digits::new(individual_digits).into()) } } /// This pre-tokenizer splits the text into fixed length chunks as used /// [here](https://www.biorxiv.org/content/10.1101/2023.01.11.523679v1.full) /// /// Args: /// length (:obj:`int`, `optional`, defaults to :obj:`5`): /// The length of the chunks to split the text into. /// /// Strings are split on the character level rather than the byte level to avoid /// splitting unicode characters consisting of multiple bytes. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "FixedLength")] pub struct PyFixedLength {} #[pymethods] impl PyFixedLength { #[getter] fn get_length(self_: PyRef<Self>) -> usize { getter!(self_, FixedLength, length) } #[setter] fn set_length(self_: PyRef<Self>, length: usize) { setter!(self_, FixedLength, length, length); } #[new] #[pyo3(signature = (length = 5), text_signature = "(self, length=5)")] fn new(length: usize) -> (Self, PyPreTokenizer) { (PyFixedLength {}, FixedLength::new(length).into()) } } /// This pre-tokenizer splits on characters that belong to different language family /// It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt /// Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too. /// This mimicks SentencePiece Unigram implementation. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "UnicodeScripts")] pub struct PyUnicodeScripts {} #[pymethods] impl PyUnicodeScripts { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyUnicodeScripts {}, UnicodeScripts::new().into()) } } #[derive(Clone)] pub(crate) struct CustomPreTokenizer { inner: PyObject, } impl CustomPreTokenizer { pub fn new(inner: PyObject) -> Self { Self { inner } } } impl tk::tokenizer::PreTokenizer for CustomPreTokenizer { fn pre_tokenize(&self, sentence: &mut PreTokenizedString) -> tk::Result<()> { Python::with_gil(|py| { let pretok = PyPreTokenizedStringRefMut::new(sentence); let py_pretok = self.inner.bind(py); py_pretok.call_method("pre_tokenize", (pretok.get().clone(),), None)?; Ok(()) }) } } impl Serialize for CustomPreTokenizer { fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom PreTokenizer cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomPreTokenizer { fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Err(serde::de::Error::custom( "Custom PreTokenizer cannot be deserialized", )) } } #[derive(Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyPreTokenizerWrapper { Custom(CustomPreTokenizer), Wrapped(PreTokenizerWrapper), } impl Serialize for PyPreTokenizerWrapper { fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> where S: Serializer, { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.serialize(serializer), PyPreTokenizerWrapper::Custom(inner) => inner.serialize(serializer), } } } #[derive(Clone)] pub(crate) enum PyPreTokenizerTypeWrapper { Sequence(Vec<Arc<RwLock<PyPreTokenizerWrapper>>>), Single(Arc<RwLock<PyPreTokenizerWrapper>>), } impl<'de> Deserialize<'de> for PyPreTokenizerTypeWrapper { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let wrapper = PreTokenizerWrapper::deserialize(deserializer)?; let py_wrapper: PyPreTokenizerWrapper = wrapper.into(); Ok(py_wrapper.into()) } } impl Serialize for PyPreTokenizerTypeWrapper { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { PyPreTokenizerTypeWrapper::Sequence(seq) => { let mut ser = serializer.serialize_struct("Sequence", 2)?; ser.serialize_field("type", "Sequence")?; ser.serialize_field("pretokenizers", seq)?; ser.end() } PyPreTokenizerTypeWrapper::Single(inner) => inner.serialize(serializer), } } } impl<I> From<I> for PyPreTokenizerWrapper where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizerWrapper::Wrapped(pretok.into()) } } impl<I> From<I> for PyPreTokenizerTypeWrapper where I: Into<PyPreTokenizerWrapper>, { fn from(pretok: I) -> Self { let pretok = pretok.into(); match pretok { PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::Sequence(seq)) => { PyPreTokenizerTypeWrapper::Sequence( seq.into_iter() .map(|e| Arc::new(RwLock::new(PyPreTokenizerWrapper::Wrapped(e.clone())))) .collect(), ) } _ => PyPreTokenizerTypeWrapper::Single(Arc::new(RwLock::new(pretok))), } } } impl<I> From<I> for PyPreTokenizer where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizer { pretok: pretok.into().into(), } } } impl PreTokenizer for PyPreTokenizerTypeWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerTypeWrapper::Single(inner) => inner .read() .map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer"))? .pre_tokenize(pretok), PyPreTokenizerTypeWrapper::Sequence(inner) => inner.iter().try_for_each(|n| { n.read() .map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer"))? .pre_tokenize(pretok) }), } } } impl PreTokenizer for PyPreTokenizerWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.pre_tokenize(pretok), PyPreTokenizerWrapper::Custom(inner) => inner.pre_tokenize(pretok), } } } /// PreTokenizers Module #[pymodule] pub fn pre_tokenizers(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::<PyPreTokenizer>()?; m.add_class::<PyByteLevel>()?; m.add_class::<PyWhitespace>()?; m.add_class::<PyWhitespaceSplit>()?; m.add_class::<PySplit>()?; m.add_class::<PyBertPreTokenizer>()?; m.add_class::<PyMetaspace>()?; m.add_class::<PyCharDelimiterSplit>()?; m.add_class::<PyPunctuation>()?; m.add_class::<PySequence>()?; m.add_class::<PyDigits>()?; m.add_class::<PyUnicodeScripts>()?; m.add_class::<PyFixedLength>()?; Ok(()) } #[cfg(test)] mod test { use pyo3::prelude::*; use tk::pre_tokenizers::sequence::Sequence; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use crate::pre_tokenizers::{ CustomPreTokenizer, PyPreTokenizer, PyPreTokenizerTypeWrapper, PyPreTokenizerWrapper, }; #[test] fn get_subtype() { Python::with_gil(|py| { let py_norm = PyPreTokenizer::new(Whitespace {}.into()); let py_wsp = py_norm.get_as_subtype(py).unwrap(); assert_eq!("Whitespace", py_wsp.bind(py).get_type().qualname().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyPreTokenizerWrapper = Whitespace {}.into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = PreTokenizerWrapper::Whitespace(Whitespace {}); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_pretok: PyPreTokenizer = serde_json::from_str(&rs_ser).unwrap(); match py_pretok.pretok { PyPreTokenizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::Whitespace(_)) => {} _ => panic!("Expected Whitespace"), }, _ => panic!("Expected wrapped, not custom."), } let py_seq: PyPreTokenizerWrapper = Sequence::new(vec![Whitespace {}.into(), WhitespaceSplit.into()]).into(); let py_wrapper_ser = serde_json::to_string(&py_seq).unwrap(); let rs_wrapped = PreTokenizerWrapper::Sequence(Sequence::new(vec![ Whitespace {}.into(), WhitespaceSplit.into(), ])); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); let py_seq = PyPreTokenizer::new(py_seq.into()); let py_ser = serde_json::to_string(&py_seq).unwrap(); assert_eq!(py_wrapper_ser, py_ser); let obj = Python::with_gil(|py| { let py_wsp = PyPreTokenizer::new(Whitespace {}.into()); let obj: PyObject = Py::new(py, py_wsp) .unwrap() .into_pyobject(py) .unwrap() .into_any() .into(); obj }); let py_seq: PyPreTokenizerWrapper = PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(obj)); assert!(serde_json::to_string(&py_seq).is_err()); } }
tokenizers/bindings/python/src/pre_tokenizers.rs/0
{ "file_path": "tokenizers/bindings/python/src/pre_tokenizers.rs", "repo_id": "tokenizers", "token_count": 18244 }
339
import pytest from tokenizers import BertWordPieceTokenizer from ..utils import bert_files, data_dir class TestEncoding: @pytest.fixture(scope="class") def encodings(self, bert_files): tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"]) single_encoding = tokenizer.encode("I love HuggingFace") pair_encoding = tokenizer.encode("I love HuggingFace", "Do you?") return single_encoding, pair_encoding def test_sequence_ids(self, encodings): single, pair = encodings assert single.sequence_ids == [None, 0, 0, 0, 0, None] assert pair.sequence_ids == [None, 0, 0, 0, 0, None, 1, 1, 1, None] def test_n_sequences(self, encodings): single, pair = encodings assert single.n_sequences == 1 assert pair.n_sequences == 2 def test_word_to_tokens(self, encodings): single, pair = encodings assert single.tokens == ["[CLS]", "i", "love", "hugging", "##face", "[SEP]"] assert single.word_to_tokens(0) == (1, 2) assert pair.tokens == [ "[CLS]", "i", "love", "hugging", "##face", "[SEP]", "do", "you", "?", "[SEP]", ] assert pair.word_to_tokens(0) == (1, 2) assert pair.word_to_tokens(0, 0) == (1, 2) assert pair.word_to_tokens(6, 0) == None assert pair.word_to_tokens(0, 1) == (6, 7) def test_word_to_chars(self, encodings): single, pair = encodings assert single.word_to_chars(2) == (7, 18) assert pair.word_to_chars(2) == (7, 18) assert pair.word_to_chars(2, 0) == (7, 18) assert pair.word_to_chars(2, 1) == (6, 7) def test_token_to_sequence(self, encodings): single, pair = encodings assert single.token_to_sequence(2) == 0 assert pair.token_to_sequence(2) == 0 assert pair.token_to_sequence(0) == None assert pair.token_to_sequence(5) == None assert pair.token_to_sequence(6) == 1 assert pair.token_to_sequence(8) == 1 assert pair.token_to_sequence(9) == None assert pair.token_to_sequence(1200) == None def test_token_to_chars(self, encodings): single, pair = encodings assert single.token_to_chars(0) == None assert single.token_to_chars(2) == (2, 6) assert pair.token_to_chars(2) == (2, 6) assert pair.token_to_chars(5) == None assert pair.token_to_chars(6) == (0, 2) def test_token_to_word(self, encodings): single, pair = encodings assert single.token_to_word(0) == None assert single.token_to_word(1) == 0 assert single.token_to_word(4) == 2 assert pair.token_to_word(1) == 0 assert pair.token_to_word(4) == 2 assert pair.token_to_word(5) == None assert pair.token_to_word(6) == 0 assert pair.token_to_word(7) == 1 def test_char_to_token(self, encodings): single, pair = encodings assert single.char_to_token(0) == 1 assert pair.char_to_token(0) == 1 assert pair.char_to_token(0, 0) == 1 assert pair.char_to_token(1, 0) == None assert pair.char_to_token(0, 1) == 6 assert pair.char_to_token(2, 1) == None def test_char_to_word(self, encodings): single, pair = encodings assert single.char_to_word(0) == 0 assert single.char_to_word(1) == None assert pair.char_to_word(2) == 1 assert pair.char_to_word(2, 0) == 1 assert pair.char_to_word(2, 1) == None assert pair.char_to_word(3, 1) == 1 def test_truncation(self, encodings): single, _ = encodings single.truncate(2, 1, "right") assert single.tokens == ["[CLS]", "i"] assert single.overflowing[0].tokens == ["i", "love"] def test_invalid_truncate_direction(self, encodings): single, _ = encodings with pytest.raises(ValueError) as excinfo: single.truncate(2, 1, "not_a_direction") assert "Invalid truncation direction value : not_a_direction" == str(excinfo.value)
tokenizers/bindings/python/tests/bindings/test_encoding.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_encoding.py", "repo_id": "tokenizers", "token_count": 1991 }
340
import pytest from tokenizers import SentencePieceBPETokenizer, SentencePieceUnigramTokenizer class TestSentencePieceBPE: def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = SentencePieceBPETokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["โ–A", "โ–sentence"] class TestSentencePieceUnigram: def test_train(self, tmpdir): p = tmpdir.mkdir("tmpdir").join("file.txt") p.write("A first sentence\nAnother sentence\nAnd a last one") tokenizer = SentencePieceUnigramTokenizer() tokenizer.train(files=str(p), show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["โ–A", "โ–", "s", "en", "t", "en", "c", "e"] with pytest.raises(Exception) as excinfo: _ = tokenizer.encode("A sentence ๐Ÿค—") assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing" def test_train_with_unk_token(self, tmpdir): p = tmpdir.mkdir("tmpdir").join("file.txt") p.write("A first sentence\nAnother sentence\nAnd a last one") tokenizer = SentencePieceUnigramTokenizer() tokenizer.train(files=str(p), show_progress=False, special_tokens=["<unk>"], unk_token="<unk>") output = tokenizer.encode("A sentence ๐Ÿค—") assert output.ids[-1] == 0 assert output.tokens == ["โ–A", "โ–", "s", "en", "t", "en", "c", "e", "โ–", "๐Ÿค—"] def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = SentencePieceUnigramTokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["โ–A", "โ–", "s", "en", "t", "en", "c", "e"] with pytest.raises(Exception) as excinfo: _ = tokenizer.encode("A sentence ๐Ÿค—") assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing" def test_train_from_iterator_with_unk_token(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = SentencePieceUnigramTokenizer() tokenizer.train_from_iterator( text, vocab_size=100, show_progress=False, special_tokens=["<unk>"], unk_token="<unk>" ) output = tokenizer.encode("A sentence ๐Ÿค—") assert output.ids[-1] == 0 assert output.tokens == ["โ–A", "โ–", "s", "en", "t", "en", "c", "e", "โ–", "๐Ÿค—"]
tokenizers/bindings/python/tests/implementations/test_sentencepiece.py/0
{ "file_path": "tokenizers/bindings/python/tests/implementations/test_sentencepiece.py", "repo_id": "tokenizers", "token_count": 1118 }
341
# Trainers <tokenizerslangcontent> <python> ## BpeTrainer [[autodoc]] tokenizers.trainers.BpeTrainer ## UnigramTrainer [[autodoc]] tokenizers.trainers.UnigramTrainer ## WordLevelTrainer [[autodoc]] tokenizers.trainers.WordLevelTrainer ## WordPieceTrainer [[autodoc]] tokenizers.trainers.WordPieceTrainer </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/trainers.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/trainers.mdx", "repo_id": "tokenizers", "token_count": 183 }
342
/* Our DOM objects */ /* Version control */ .selectors { margin-bottom: 10px; } .dropdown-button { display: inline-block; width: 50%; background-color: #6670FF; color: white; border: none; padding: 5px; font-size: 15px; cursor: pointer; } .dropdown-button:hover, .dropdown-button:focus, .dropdown-button.active { background-color: #A6B0FF; } .dropdown-button.active { background-color: #7988FF; } .menu-dropdown { display: none; background-color: #7988FF; min-width: 160px; overflow: auto; font-size: 15px; padding: 10px 0; } .menu-dropdown a { color: white; padding: 3px 4px; text-decoration: none; display: block; } .menu-dropdown a:hover { background-color: #A6B0FF; } .dropdown-link.active { background-color: #A6B0FF; } .show { display: block; } /* The literal code blocks */ .rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal { color: #6670FF; } /* To keep the logo centered */ .wy-side-scroll { width: auto; font-size: 20px; } /* The div that holds the Hugging Face logo */ .HuggingFaceDiv { width: 100% } /* The research field on top of the toc tree */ .wy-side-nav-search{ padding-top: 0; background-color: #6670FF; } /* The toc tree */ .wy-nav-side{ background-color: #6670FF; padding-bottom: 0; } /* The section headers in the toc tree */ .wy-menu-vertical p.caption{ background-color: #4d59ff; line-height: 40px; } /* The selected items in the toc tree */ .wy-menu-vertical li.current{ background-color: #A6B0FF; } /* When a list item that does belong to the selected block from the toc tree is hovered */ .wy-menu-vertical li.current a:hover{ background-color: #B6C0FF; } /* When a list item that does NOT belong to the selected block from the toc tree is hovered. */ .wy-menu-vertical li a:hover{ background-color: #A7AFFB; } /* The text items on the toc tree */ .wy-menu-vertical a { color: #FFFFDD; font-family: Calibre-Light, sans-serif; } .wy-menu-vertical header, .wy-menu-vertical p.caption{ color: white; font-family: Calibre-Light, sans-serif; } /* The color inside the selected toc tree block */ .wy-menu-vertical li.toctree-l2 a, .wy-menu-vertical li.toctree-l3 a, .wy-menu-vertical li.toctree-l4 a { color: black; } /* Inside the depth-2 selected toc tree block */ .wy-menu-vertical li.toctree-l2.current>a { background-color: #B6C0FF } .wy-menu-vertical li.toctree-l2.current li.toctree-l3>a { background-color: #C6D0FF } /* Inside the depth-3 selected toc tree block */ .wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{ background-color: #D6E0FF } /* Inside code snippets */ .rst-content dl:not(.docutils) dt{ font-size: 15px; } /* Links */ a { color: #6670FF; } /* Content bars */ .rst-content dl:not(.docutils) dt { background-color: rgba(251, 141, 104, 0.1); border-right: solid 2px #FB8D68; border-left: solid 2px #FB8D68; color: #FB8D68; font-family: Calibre-Light, sans-serif; border-top: none; font-style: normal !important; } /* Expand button */ .wy-menu-vertical li.toctree-l2 span.toctree-expand, .wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current>a span.toctree-expand, .wy-menu-vertical li.toctree-l3 span.toctree-expand{ color: black; } /* Max window size */ .wy-nav-content{ max-width: 1200px; } /* Mobile header */ .wy-nav-top{ background-color: #6670FF; } /* Source spans */ .rst-content .viewcode-link, .rst-content .viewcode-back{ color: #6670FF; font-size: 110%; letter-spacing: 2px; text-transform: uppercase; } /* It would be better for table to be visible without horizontal scrolling */ .wy-table-responsive table td, .wy-table-responsive table th{ white-space: normal; } .footer { margin-top: 20px; } .footer__Social { display: flex; flex-direction: row; } .footer__CustomImage { margin: 2px 5px 0 0; } /* class and method names in doc */ .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) code.descclassname{ font-family: Calibre, sans-serif; font-size: 20px !important; } /* class name in doc*/ .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname{ margin-right: 10px; font-family: Calibre-Medium, sans-serif; } /* Method and class parameters */ .sig-param{ line-height: 23px; } /* Class introduction "class" string at beginning */ .rst-content dl:not(.docutils) .property{ font-size: 18px; color: black; } /* FONTS */ body{ font-family: Calibre, sans-serif; font-size: 16px; } h1 { font-family: Calibre-Thin, sans-serif; font-size: 70px; } h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend{ font-family: Calibre-Medium, sans-serif; } @font-face { font-family: Calibre-Medium; src: url(./Calibre-Medium.otf); font-weight:400; } @font-face { font-family: Calibre; src: url(./Calibre-Regular.otf); font-weight:400; } @font-face { font-family: Calibre-Light; src: url(./Calibre-Light.ttf); font-weight:400; } @font-face { font-family: Calibre-Thin; src: url(./Calibre-Thin.otf); font-weight:400; } /** * Nav Links to other parts of huggingface.co */ div.hf-menu { position: absolute; top: 0; right: 0; padding-top: 20px; padding-right: 20px; z-index: 1000; } div.hf-menu a { font-size: 14px; letter-spacing: 0.3px; text-transform: uppercase; color: white; -webkit-font-smoothing: antialiased; background: linear-gradient(0deg, #6671ffb8, #9a66ffb8 50%); padding: 10px 16px 6px 16px; border-radius: 3px; margin-left: 12px; position: relative; } div.hf-menu a:active { top: 1px; } @media (min-width: 768px) and (max-width: 1860px) { .wy-breadcrumbs { margin-top: 32px; } } @media (max-width: 768px) { div.hf-menu { display: none; } }
tokenizers/docs/source/_static/css/huggingface.css/0
{ "file_path": "tokenizers/docs/source/_static/css/huggingface.css", "repo_id": "tokenizers", "token_count": 2708 }
343
Training from memory ---------------------------------------------------------------------------------------------------- In the `Quicktour <quicktour>`__, we saw how to build and train a tokenizer using text files, but we can actually use any Python Iterator. In this section we'll see a few different ways of training our tokenizer. For all the examples listed below, we'll use the same :class:`~tokenizers.Tokenizer` and :class:`~tokenizers.trainers.Trainer`, built as following: .. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py :language: python :start-after: START init_tokenizer_trainer :end-before: END init_tokenizer_trainer :dedent: 8 This tokenizer is based on the :class:`~tokenizers.models.Unigram` model. It takes care of normalizing the input using the NFKC Unicode normalization method, and uses a :class:`~tokenizers.pre_tokenizers.ByteLevel` pre-tokenizer with the corresponding decoder. For more information on the components used here, you can check `here <components>`__ The most basic way ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As you probably guessed already, the easiest way to train our tokenizer is by using a :obj:`List`: .. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py :language: python :start-after: START train_basic :end-before: END train_basic :dedent: 8 Easy, right? You can use anything working as an iterator here, be it a :obj:`List`, :obj:`Tuple`, or a :obj:`np.Array`. Anything works as long as it provides strings. Using the ๐Ÿค— Datasets library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An awesome way to access one of the many datasets that exist out there is by using the ๐Ÿค— Datasets library. For more information about it, you should check `the official documentation here <https://huggingface.co/docs/datasets/>`__. Let's start by loading our dataset: .. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py :language: python :start-after: START load_dataset :end-before: END load_dataset :dedent: 8 The next step is to build an iterator over this dataset. The easiest way to do this is probably by using a generator: .. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py :language: python :start-after: START def_batch_iterator :end-before: END def_batch_iterator :dedent: 8 As you can see here, for improved efficiency we can actually provide a batch of examples used to train, instead of iterating over them one by one. By doing so, we can expect performances very similar to those we got while training directly from files. With our iterator ready, we just need to launch the training. In order to improve the look of our progress bars, we can specify the total length of the dataset: .. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py :language: python :start-after: START train_datasets :end-before: END train_datasets :dedent: 8 And that's it! Using gzip files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since gzip files in Python can be used as iterators, it is extremely simple to train on such files: .. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py :language: python :start-after: START single_gzip :end-before: END single_gzip :dedent: 8 Now if we wanted to train from multiple gzip files, it wouldn't be much harder: .. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py :language: python :start-after: START multi_gzip :end-before: END multi_gzip :dedent: 8 And voilร !
tokenizers/docs/source/tutorials/python/training_from_memory.rst/0
{ "file_path": "tokenizers/docs/source/tutorials/python/training_from_memory.rst", "repo_id": "tokenizers", "token_count": 1149 }
344
[package] name = "unstable_wasm" version = "0.1.0" authors = ["Nicolas Patry"] edition = "2018" [lib] crate-type = ["cdylib", "rlib"] [features] default = ["console_error_panic_hook"] [dependencies] wasm-bindgen = "0.2.63" # The `console_error_panic_hook` crate provides better debugging of panics by # logging them with `console.error`. This is great for development, but requires # all the `std::fmt` and `std::panicking` infrastructure, so isn't great for # code size when deploying. console_error_panic_hook = { version = "0.1.6", optional = true } # `wee_alloc` is a tiny allocator for wasm that is only ~1K in code size # compared to the default allocator's ~10K. It is slower than the default # allocator, however. # # Unfortunately, `wee_alloc` requires nightly Rust when targeting wasm for now. wee_alloc = { version = "0.4.5", optional = true } tokenizers = { path = "../../", default-features=false, features = ["unstable_wasm"]} [dev-dependencies] wasm-bindgen-test = "0.3.13" [profile.release] # Tell `rustc` to optimize for small code size. opt-level = "s"
tokenizers/tokenizers/examples/unstable_wasm/Cargo.toml/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/Cargo.toml", "repo_id": "tokenizers", "token_count": 364 }
345
const CopyWebpackPlugin = require("copy-webpack-plugin"); const path = require('path'); module.exports = { entry: "./bootstrap.js", output: { path: path.resolve(__dirname, "dist"), filename: "bootstrap.js", }, mode: "development", plugins: [ new CopyWebpackPlugin(['index.html']) ], };
tokenizers/tokenizers/examples/unstable_wasm/www/webpack.config.js/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/webpack.config.js", "repo_id": "tokenizers", "token_count": 114 }
346
//! Popular tokenizer models. pub mod bpe; pub mod unigram; pub mod wordlevel; pub mod wordpiece; use ahash::AHashMap; use std::collections::HashMap; use std::path::{Path, PathBuf}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use crate::models::bpe::{BpeTrainer, BPE}; use crate::models::unigram::{Unigram, UnigramTrainer}; use crate::models::wordlevel::{WordLevel, WordLevelTrainer}; use crate::models::wordpiece::{WordPiece, WordPieceTrainer}; use crate::{AddedToken, Model, Result, Token, Trainer}; /// Wraps a vocab mapping (ID -> token) to a struct that will be serialized in order /// of token ID, smallest to largest. struct OrderedVocabIter<'a> { vocab_r: &'a AHashMap<u32, String>, } impl<'a> OrderedVocabIter<'a> { fn new(vocab_r: &'a AHashMap<u32, String>) -> Self { Self { vocab_r } } } impl Serialize for OrderedVocabIter<'_> { fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> where S: Serializer, { // There could be holes so max + 1 is more correct than vocab_r.len() let mut holes = vec![]; let result = if let Some(max) = self.vocab_r.keys().max() { let iter = (0..*max + 1).filter_map(|i| { if let Some(token) = self.vocab_r.get(&i) { Some((token, i)) } else { holes.push(i); None } }); serializer.collect_map(iter) } else { serializer.collect_map(std::iter::empty::<(&str, u32)>()) }; if !holes.is_empty() { warn!("The OrderedVocab you are attempting to save contains holes for indices {holes:?}, your vocabulary could be corrupted !"); println!("The OrderedVocab you are attempting to save contains holes for indices {holes:?}, your vocabulary could be corrupted !"); } result } } #[derive(Serialize, Debug, PartialEq, Clone)] #[serde(untagged)] pub enum ModelWrapper { BPE(BPE), // WordPiece must stay before WordLevel here for deserialization (for retrocompatibility // with the versions not including the "type"), since WordLevel is a subset of WordPiece WordPiece(WordPiece), WordLevel(WordLevel), Unigram(Unigram), } impl<'de> Deserialize<'de> for ModelWrapper { fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] pub struct Tagged { #[serde(rename = "type")] variant: EnumType, #[serde(flatten)] rest: serde_json::Value, } #[derive(Deserialize)] pub enum EnumType { BPE, WordPiece, WordLevel, Unigram, } #[derive(Deserialize)] #[serde(untagged)] pub enum ModelHelper { Tagged(Tagged), Legacy(serde_json::Value), } #[derive(Deserialize)] #[serde(untagged)] pub enum ModelUntagged { BPE(BPE), // WordPiece must stay before WordLevel here for deserialization (for retrocompatibility // with the versions not including the "type"), since WordLevel is a subset of WordPiece WordPiece(WordPiece), WordLevel(WordLevel), Unigram(Unigram), } let helper = ModelHelper::deserialize(deserializer)?; Ok(match helper { ModelHelper::Tagged(model) => match model.variant { EnumType::BPE => ModelWrapper::BPE( serde_json::from_value(model.rest).map_err(serde::de::Error::custom)?, ), EnumType::WordPiece => ModelWrapper::WordPiece( serde_json::from_value(model.rest).map_err(serde::de::Error::custom)?, ), EnumType::WordLevel => ModelWrapper::WordLevel( serde_json::from_value(model.rest).map_err(serde::de::Error::custom)?, ), EnumType::Unigram => ModelWrapper::Unigram( serde_json::from_value(model.rest).map_err(serde::de::Error::custom)?, ), }, ModelHelper::Legacy(value) => { let untagged = serde_json::from_value(value).map_err(serde::de::Error::custom)?; match untagged { ModelUntagged::BPE(bpe) => ModelWrapper::BPE(bpe), ModelUntagged::WordPiece(bpe) => ModelWrapper::WordPiece(bpe), ModelUntagged::WordLevel(bpe) => ModelWrapper::WordLevel(bpe), ModelUntagged::Unigram(bpe) => ModelWrapper::Unigram(bpe), } } }) } } impl_enum_from!(WordLevel, ModelWrapper, WordLevel); impl_enum_from!(WordPiece, ModelWrapper, WordPiece); impl_enum_from!(BPE, ModelWrapper, BPE); impl_enum_from!(Unigram, ModelWrapper, Unigram); impl Model for ModelWrapper { type Trainer = TrainerWrapper; fn tokenize(&self, tokens: &str) -> Result<Vec<Token>> { match self { Self::WordLevel(t) => t.tokenize(tokens), Self::WordPiece(t) => t.tokenize(tokens), Self::BPE(t) => t.tokenize(tokens), Self::Unigram(t) => t.tokenize(tokens), } } fn token_to_id(&self, token: &str) -> Option<u32> { match self { Self::WordLevel(t) => t.token_to_id(token), Self::WordPiece(t) => t.token_to_id(token), Self::BPE(t) => t.token_to_id(token), Self::Unigram(t) => t.token_to_id(token), } } fn id_to_token(&self, id: u32) -> Option<String> { match self { Self::WordLevel(t) => t.id_to_token(id), Self::WordPiece(t) => t.id_to_token(id), Self::BPE(t) => t.id_to_token(id), Self::Unigram(t) => t.id_to_token(id), } } fn get_vocab(&self) -> HashMap<String, u32> { match self { Self::WordLevel(t) => t.get_vocab(), Self::WordPiece(t) => t.get_vocab(), Self::BPE(t) => t.get_vocab(), Self::Unigram(t) => t.get_vocab(), } } fn get_vocab_size(&self) -> usize { match self { Self::WordLevel(t) => t.get_vocab_size(), Self::WordPiece(t) => t.get_vocab_size(), Self::BPE(t) => t.get_vocab_size(), Self::Unigram(t) => t.get_vocab_size(), } } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { match self { Self::WordLevel(t) => t.save(folder, name), Self::WordPiece(t) => t.save(folder, name), Self::BPE(t) => t.save(folder, name), Self::Unigram(t) => t.save(folder, name), } } fn get_trainer(&self) -> Self::Trainer { match self { Self::WordLevel(t) => t.get_trainer().into(), Self::WordPiece(t) => t.get_trainer().into(), Self::BPE(t) => t.get_trainer().into(), Self::Unigram(t) => t.get_trainer().into(), } } } impl ModelWrapper { pub fn clear_cache(&mut self) { match self { Self::Unigram(model) => model.clear_cache(), Self::BPE(model) => model.clear_cache(), _ => (), } } pub fn resize_cache(&mut self, capacity: usize) { match self { Self::Unigram(model) => model.resize_cache(capacity), Self::BPE(model) => model.resize_cache(capacity), _ => (), } } } #[derive(Clone, Serialize, Deserialize)] pub enum TrainerWrapper { BpeTrainer(BpeTrainer), WordPieceTrainer(WordPieceTrainer), WordLevelTrainer(WordLevelTrainer), UnigramTrainer(UnigramTrainer), } impl Trainer for TrainerWrapper { type Model = ModelWrapper; fn should_show_progress(&self) -> bool { match self { Self::BpeTrainer(bpe) => bpe.should_show_progress(), Self::WordPieceTrainer(wpt) => wpt.should_show_progress(), Self::WordLevelTrainer(wpt) => wpt.should_show_progress(), Self::UnigramTrainer(wpt) => wpt.should_show_progress(), } } fn train(&self, model: &mut ModelWrapper) -> Result<Vec<AddedToken>> { match self { Self::BpeTrainer(t) => match model { ModelWrapper::BPE(bpe) => t.train(bpe), _ => Err("BpeTrainer can only train a BPE".into()), }, Self::WordPieceTrainer(t) => match model { ModelWrapper::WordPiece(wp) => t.train(wp), _ => Err("WordPieceTrainer can only train a WordPiece".into()), }, Self::WordLevelTrainer(t) => match model { ModelWrapper::WordLevel(wl) => t.train(wl), _ => Err("WordLevelTrainer can only train a WordLevel".into()), }, Self::UnigramTrainer(t) => match model { ModelWrapper::Unigram(u) => t.train(u), _ => Err("UnigramTrainer can only train a Unigram".into()), }, } } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { match self { Self::BpeTrainer(bpe) => bpe.feed(iterator, process), Self::WordPieceTrainer(wpt) => wpt.feed(iterator, process), Self::WordLevelTrainer(wpt) => wpt.feed(iterator, process), Self::UnigramTrainer(wpt) => wpt.feed(iterator, process), } } } impl_enum_from!(BpeTrainer, TrainerWrapper, BpeTrainer); impl_enum_from!(WordPieceTrainer, TrainerWrapper, WordPieceTrainer); impl_enum_from!(UnigramTrainer, TrainerWrapper, UnigramTrainer); impl_enum_from!(WordLevelTrainer, TrainerWrapper, WordLevelTrainer); #[cfg(test)] mod tests { use super::*; use crate::models::bpe::{BpeBuilder, Vocab}; #[test] fn trainer_wrapper_train_model_wrapper() { let trainer = TrainerWrapper::BpeTrainer(BpeTrainer::default()); let mut model = ModelWrapper::Unigram(Unigram::default()); let result = trainer.train(&mut model); assert!(result.is_err()); } #[test] fn incomplete_ordered_vocab() { let vocab_r: AHashMap<u32, String> = AHashMap::from([(0, "Hi".to_string()), (2, "There".to_string())]); let ordered = OrderedVocabIter::new(&vocab_r); let serialized = serde_json::to_string(&ordered).unwrap(); assert_eq!(serialized, "{\"Hi\":0,\"There\":2}"); } #[test] fn serialization() { let vocab: Vocab = [ ("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2), ("ab".into(), 3), ] .iter() .cloned() .collect(); let bpe = BpeBuilder::default() .vocab_and_merges(vocab, vec![("a".to_string(), "b".to_string())]) .unk_token("<unk>".to_string()) .ignore_merges(true) .build() .unwrap(); let model = ModelWrapper::BPE(bpe); let legacy = r#"{"type":"BPE","dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b":2,"ab":3},"merges":["a b"]}"#; let legacy = serde_json::from_str(legacy).unwrap(); assert_eq!(model, legacy); let data = serde_json::to_string(&model).unwrap(); assert_eq!( data, r#"{"type":"BPE","dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b":2,"ab":3},"merges":[["a","b"]]}"# ); let reconstructed = serde_json::from_str(&data).unwrap(); assert_eq!(model, reconstructed); // Legacy check, type is not necessary. let legacy = r#"{"dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b":2,"ab":3},"merges":["a b"]}"#; let reconstructed = serde_json::from_str(legacy).unwrap(); assert_eq!(model, reconstructed); let invalid = r#"{"type":"BPE","dropout":null,"unk_token":"<unk>","continuing_subword_prefix":null,"end_of_word_suffix":null,"fuse_unk":false,"byte_fallback":false,"ignore_merges":true,"vocab":{"<unk>":0,"a":1,"b":2,"ab":3},"merges":["a b c"]}"#; let reconstructed: std::result::Result<ModelWrapper, serde_json::Error> = serde_json::from_str(invalid); match reconstructed { Err(err) => assert_eq!(err.to_string(), "Merges text file invalid at line 1"), _ => panic!("Expected an error here"), } } }
tokenizers/tokenizers/src/models/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/mod.rs", "repo_id": "tokenizers", "token_count": 6335 }
347
use crate::tokenizer::{NormalizedString, Normalizer, Result}; pub use spm_precompiled::Precompiled; use std::cmp::Ordering; use unicode_segmentation::UnicodeSegmentation; fn replace(transformations: &mut Vec<(char, isize)>, old_part: &str, new_part: &str) { let old_count = old_part.chars().count() as isize; let new_count = new_part.chars().count() as isize; let diff = new_count - old_count; // If we are just replacing characters, all changes should be == 0 transformations.extend(new_part.chars().map(|c| (c, 0))); match diff.cmp(&0) { // If we are adding some characters, the last DIFF characters should be == 1 Ordering::Greater => { transformations .iter_mut() .rev() .take(diff as usize) .for_each(|(_, cs)| *cs = 1); } // If we are removing some characters, the last one should include the diff Ordering::Less => { if let Some((_, cs)) = transformations.last_mut() { *cs += diff; } } _ => {} } } impl Normalizer for Precompiled { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { let mut transformations = Vec::with_capacity(normalized.get().len()); // Future reader. From @Narsil. // Yes, this is weird, // Yes, this seems broken // No, I don't know why Google did this. // If you question this code, check this normalizer against // XNLI database (all languages) with Unigram model against // Mbart, XLMRoberta *AND* Marian. If you don't get 100% or // break a single test. // You don't pass. let mut modified = false; normalized.get().graphemes(true).for_each(|grapheme| { if grapheme.len() < 6 { if let Some(norm) = self.transform(grapheme) { modified = true; replace(&mut transformations, grapheme, norm); return; } } for (char_index, c) in grapheme.char_indices() { let part = &grapheme[char_index..char_index + c.len_utf8()]; if let Some(norm) = self.transform(part) { modified = true; replace(&mut transformations, part, norm); } else { transformations.push((c, 0)); } } }); if modified { normalized.transform(transformations, 0); } Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn expansion_followed_by_removal() { // Simulate transformations from "โ„ข\x1eg" to "TMg" let mut transformations = vec![]; let mut n = NormalizedString::from("โ„ข\x1eg"); replace(&mut transformations, "โ„ข", "TM"); replace(&mut transformations, "\x1e", ""); transformations.push(('g', 0)); n.transform(transformations, 0); assert_eq!(n.get(), "TMg"); } }
tokenizers/tokenizers/src/normalizers/precompiled.rs/0
{ "file_path": "tokenizers/tokenizers/src/normalizers/precompiled.rs", "repo_id": "tokenizers", "token_count": 1431 }
348
mod pre_tokenizer; mod scripts; // Re-export the PreTokenizer pub use pre_tokenizer::UnicodeScripts;
tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/mod.rs", "repo_id": "tokenizers", "token_count": 35 }
349
use ahash::AHashMap; use std::borrow::Borrow; use std::hash::Hash; use std::sync::RwLock; /// The default capacity for a `BPE`'s internal cache. pub static DEFAULT_CACHE_CAPACITY: usize = 10_000; /// The maximum length we should cache in a model /// Strings that are too long have minimal chances to cache hit anyway pub static MAX_LENGTH: usize = 256; /// Provides a simple multithread cache to speed up BPE tokenization that will try to read values /// concurrently but won't block if another thread is writing. /// The goal is clearly not the accuracy of the content, both get and set /// are not guaranteed to actually get or set. #[derive(Debug)] pub(crate) struct Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { map: RwLock<AHashMap<K, V>>, pub capacity: usize, } // We dont really care about Cache comparison, so let's make them always equal impl<K, V> PartialEq for Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { fn eq(&self, _other: &Cache<K, V>) -> bool { true } } impl<K, V> Default for Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { fn default() -> Self { Self::new(DEFAULT_CACHE_CAPACITY) } } impl<K, V> Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { /// Create new `Cache` with the given capacity. pub(crate) fn new(capacity: usize) -> Self { let map = RwLock::new(AHashMap::with_capacity(capacity)); Cache { map, capacity } } /// Create a fresh `Cache` with the same configuration. pub(crate) fn fresh(&self) -> Self { Self::new(self.capacity) } /// Clear the cache. pub(crate) fn clear(&self) { self.map.write().unwrap().clear(); } #[allow(dead_code)] pub(crate) fn get_values<'a, I, Q>(&self, keys_iter: I) -> Option<Vec<Option<V>>> where I: Iterator<Item = &'a Q>, K: Borrow<Q>, Q: Hash + Eq + ?Sized + 'a, { if let Ok(ref mut cache) = self.map.try_read() { Some(keys_iter.map(|k| cache.get(k).cloned()).collect()) } else { None } } pub(crate) fn get<Q>(&self, key: &Q) -> Option<V> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { if let Ok(ref mut cache) = self.map.try_read() { cache.get(key).cloned() } else { None } } pub(crate) fn set_values<I>(&self, entries: I) where I: IntoIterator<Item = (K, V)>, { // Before trying to acquire a write lock, we check if we are already at // capacity with a read handler. if let Ok(cache) = self.map.try_read() { if cache.len() >= self.capacity { // At capacity, so do nothing. return; } } else { // If we couldn't acquire a read handle then we probably won't be able to acquire // a write handle one quadrillionth of a second later. return; } // Not at capacity, so try acquiring a write handle. if let Ok(mut cache) = self.map.try_write() { let free = self.capacity - cache.len(); cache.extend(entries.into_iter().take(free)); } } pub(crate) fn set(&self, key: K, value: V) { self.set_values(std::iter::once((key, value))) } pub(crate) fn resize(&mut self, capacity: usize) { self.capacity = capacity; if let Ok(mut cache) = self.map.try_write() { cache.shrink_to(capacity); } } }
tokenizers/tokenizers/src/utils/cache.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/cache.rs", "repo_id": "tokenizers", "token_count": 1571 }
350
use tokenizers::{ normalizers, pre_tokenizers::split::{Split, SplitPattern}, AddedToken, NormalizerWrapper, PreTokenizerWrapper, SplitDelimiterBehavior, Tokenizer, }; #[test] fn test_decoding_with_added_bpe() { let mut tokenizer = Tokenizer::from_file("data/llama-3-tokenizer.json").unwrap(); tokenizer.with_normalizer(Some(NormalizerWrapper::from(normalizers::ByteLevel::new()))); tokenizer.with_pre_tokenizer(Some(PreTokenizerWrapper::Split( Split::new( SplitPattern::Regex(r"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+".into()), SplitDelimiterBehavior::Isolated, false, ) .unwrap(), ))); tokenizer.add_tokens(&[AddedToken::from("ๅ—Ž", false).normalized(false)]); let encoded = tokenizer .encode("Hey! how is this token: ๅ—Ž", false) .unwrap(); assert_eq!( encoded.get_ids(), [19182, 0, 1268, 602, 82, 62428, 82, 4037, 25, 220, 128256] ); assert_eq!( encoded.get_tokens(), ["Hey", "!", "ฤ how", "ฤ i", "s", "ฤ thi", "s", "ฤ token", ":", "ฤ ", "ๅ—Ž"] ); let decoded = tokenizer.decode(encoded.get_ids(), false); assert_eq!(decoded.unwrap(), "Hey! how is this token: ๅ—Ž"); tokenizer.add_tokens(&[AddedToken::from("ะด", false).normalized(true)]); let encoded = tokenizer .encode("Hey! how is this token: ะด", false) .unwrap(); assert_eq!( encoded.get_ids(), [19182, 0, 1268, 602, 82, 62428, 82, 4037, 25, 220, 128257] ); assert_eq!( encoded.get_tokens(), ["Hey", "!", "ฤ how", "ฤ i", "s", "ฤ thi", "s", "ฤ token", ":", "ฤ ", "รยด"] ); let decoded = tokenizer.decode(encoded.get_ids(), false); assert_eq!(decoded.unwrap(), "Hey! how is this token: ะด") } #[test] fn test_decode_stream_step_no_panic() { let tokenizer = Tokenizer::from_file("data/llama-3-tokenizer.json").unwrap(); // "A B C D E F G H I J" let mut decode_stream = tokenizer.decode_stream(false); assert_eq!(decode_stream.step(32).unwrap(), Some("A".to_string())); assert_eq!(decode_stream.step(426).unwrap(), Some(" B".to_string())); assert_eq!(decode_stream.step(356).unwrap(), Some(" C".to_string())); assert_eq!(decode_stream.step(423).unwrap(), Some(" D".to_string())); assert_eq!(decode_stream.step(469).unwrap(), Some(" E".to_string())); assert_eq!(decode_stream.step(435).unwrap(), Some(" F".to_string())); assert_eq!(decode_stream.step(480).unwrap(), Some(" G".to_string())); assert_eq!(decode_stream.step(473).unwrap(), Some(" H".to_string())); assert_eq!(decode_stream.step(358).unwrap(), Some(" I".to_string())); assert_eq!(decode_stream.step(622).unwrap(), Some(" J".to_string())); // for (i, &token) in output_tokens.iter().enumerate() {} // "์‚ฅ๋ฝ•๋นต" (Korean words composed of 2-3 tokens: [80690, 98], [167, 121, 243], and [102457, 113]) let mut decode_stream = tokenizer.decode_stream(false); assert_eq!(decode_stream.step(80690).unwrap(), None); assert_eq!(decode_stream.step(98).unwrap(), Some("์‚ฅ".to_string())); assert_eq!(decode_stream.step(167).unwrap(), None); assert_eq!(decode_stream.step(121).unwrap(), None); assert_eq!(decode_stream.step(243).unwrap(), Some("๋ฝ•".to_string())); assert_eq!(decode_stream.step(102457).unwrap(), None); assert_eq!(decode_stream.step(113).unwrap(), Some("๋นต".to_string())); }
tokenizers/tokenizers/tests/stream.rs/0
{ "file_path": "tokenizers/tokenizers/tests/stream.rs", "repo_id": "tokenizers", "token_count": 1591 }
351
# Server-side Audio Processing in Node.js A major benefit of writing code for the web is that you can access the multitude of APIs that are available in modern browsers. Unfortunately, when writing server-side code, we are not afforded such luxury, so we have to find another way. In this tutorial, we will design a simple Node.js application that uses Transformers.js for speech recognition with [Whisper](https://huggingface.co/Xenova/whisper-tiny.en), and in the process, learn how to process audio on the server. The main problem we need to solve is that the [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) is not available in Node.js, meaning we can't use the [`AudioContext`](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext) class to process audio. So, we will need to install third-party libraries to obtain the raw audio data. For this example, we will only consider `.wav` files, but the same principles apply to other audio formats. <Tip> This tutorial will be written as an ES module, but you can easily adapt it to use CommonJS instead. For more information, see the [node tutorial](https://huggingface.co/docs/transformers.js/tutorials/node). </Tip> **Useful links:** - [Source code](https://github.com/huggingface/transformers.js/tree/main/examples/node-audio-processing) - [Documentation](https://huggingface.co/docs/transformers.js) ## Prerequisites - [Node.js](https://nodejs.org/en/) version 18+ - [npm](https://www.npmjs.com/) version 9+ ## Getting started Let's start by creating a new Node.js project and installing Transformers.js via [NPM](https://www.npmjs.com/package/@huggingface/transformers): ```bash npm init -y npm i @huggingface/transformers ``` <Tip> Remember to add `"type": "module"` to your `package.json` to indicate that your project uses ECMAScript modules. </Tip> Next, let's install the [`wavefile`](https://www.npmjs.com/package/wavefile) package, which we will use for loading `.wav` files: ```bash npm i wavefile ``` ## Creating the application Start by creating a new file called `index.js`, which will be the entry point for our application. Let's also import the necessary modules: ```js import { pipeline } from '@huggingface/transformers'; import wavefile from 'wavefile'; ``` For this tutorial, we will use the `Xenova/whisper-tiny.en` model, but feel free to choose one of the other whisper models from the [Hugging Face Hub](https://huggingface.co/models?library=transformers.js&search=whisper). Let's create our pipeline with: ```js let transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en'); ``` Next, let's load an audio file and convert it to the format required by Transformers.js: ```js // Load audio data let url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; let buffer = Buffer.from(await fetch(url).then(x => x.arrayBuffer())) // Read .wav file and convert it to required format let wav = new wavefile.WaveFile(buffer); wav.toBitDepth('32f'); // Pipeline expects input as a Float32Array wav.toSampleRate(16000); // Whisper expects audio with a sampling rate of 16000 let audioData = wav.getSamples(); if (Array.isArray(audioData)) { if (audioData.length > 1) { const SCALING_FACTOR = Math.sqrt(2); // Merge channels (into first channel to save memory) for (let i = 0; i < audioData[0].length; ++i) { audioData[0][i] = SCALING_FACTOR * (audioData[0][i] + audioData[1][i]) / 2; } } // Select first channel audioData = audioData[0]; } ``` Finally, let's run the model and measure execution duration. ```js let start = performance.now(); let output = await transcriber(audioData); let end = performance.now(); console.log(`Execution duration: ${(end - start) / 1000} seconds`); console.log(output); ``` You can now run the application with `node index.js`. Note that when running the script for the first time, it may take a while to download and cache the model. Subsequent requests will use the cached model, and model loading will be much faster. You should see output similar to: ``` Execution duration: 0.6460317999720574 seconds { text: ' And so my fellow Americans ask not what your country can do for you. Ask what you can do for your country.' } ``` That's it! You've successfully created a Node.js application that uses Transformers.js for speech recognition with Whisper. You can now use this as a starting point for your own applications.
transformers.js/docs/source/guides/node-audio-processing.md/0
{ "file_path": "transformers.js/docs/source/guides/node-audio-processing.md", "repo_id": "transformers.js", "token_count": 1352 }
352
import { useState, useRef, useEffect } from "react"; import Editor from "@monaco-editor/react"; import Progress from './components/Progress'; import './App.css' const MODELS = [ 'Xenova/tiny_starcoder_py', 'Xenova/codegen-350M-mono', // 'Xenova/starcoderbase-1b', ] function App() { // Editor setup const monaco = useRef(null); const [monacoReady, setMonacoReady] = useState(false); const [language, setLanguage] = useState('python'); // Only allow python for now // Model loading const [ready, setReady] = useState(null); const [disabled, setDisabled] = useState(false); const [progressItems, setProgressItems] = useState([]); // Inputs and outputs const [model, setModel] = useState(MODELS[0]); const [maxNewTokens, setMaxNewTokens] = useState(45); const [code, setCode] = useState('\ndef fib(n):\n """Calculates the nth Fibonacci number"""\n'); // Generation parameters const [temperature, setTemperature] = useState(0.5); const [topK, setTopK] = useState(5); const [doSample, setDoSample] = useState(false); // Create a reference to the worker object. const worker = useRef(null); // We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted. useEffect(() => { if (!worker.current) { // Create the worker if it does not yet exist. worker.current = new Worker(new URL('./worker.js', import.meta.url), { type: 'module' }); } // Create a callback function for messages from the worker thread. const onMessageReceived = (e) => { switch (e.data.status) { case 'initiate': // Model file start load: add a new progress item to the list. setReady(false); setProgressItems(prev => [...prev, e.data]); break; case 'progress': // Model file progress: update one of the progress items. setProgressItems( prev => prev.map(item => { if (item.file === e.data.file) { return { ...item, ...e.data } } return item; }) ); break; case 'done': // Model file loaded: remove the progress item from the list. setProgressItems( prev => prev.filter(item => item.file !== e.data.file) ); break; case 'ready': // Pipeline ready: the worker is ready to accept messages. setReady(true); break; case 'update': // Generation update: update the output text. setCode(e.data.output); break; case 'complete': // Generation complete: re-enable the "Generate" button setDisabled(false); break; } }; // Attach the callback function as an event listener. worker.current.addEventListener('message', onMessageReceived); // Define a cleanup function for when the component is unmounted. return () => worker.current.removeEventListener('message', onMessageReceived); }); useEffect(() => { const m = monaco.current; if (!m) return; let actionRegistration = m.addAction({ id: "generate", label: "Generate", contextMenuGroupId: "0_custom", run: () => { const val = m.getValue(); if (!val) return; worker.current.postMessage({ model, text: val, max_new_tokens: maxNewTokens, temperature, top_k: topK, do_sample: doSample }); } }); // Define a cleanup function for when the component is unmounted. return () => actionRegistration.dispose(); }, [monacoReady, model, maxNewTokens, temperature, topK, doSample]); const showLoading = ready === false || progressItems.length > 0 return ( <div className="flex h-screen w-screen"> <div className="gap-1 z-50 top-0 left-0 absolute w-full h-full transition-all px-32 flex flex-col justify-center text-center" style={{ opacity: showLoading ? 1 : 0, pointerEvents: showLoading ? 'all' : 'none', background: 'rgba(0, 0, 0, 0.5)', backdropFilter: 'blur(4px)', }}> {ready === false && ( <label className="text-3xl p-3">Loading model...</label> )} {progressItems.map(data => ( <div key={data.file}> <Progress data={data} /> </div> ))} </div> <div> <Editor width="calc(100vw - 360px)" language={language} value={code} theme="vs-dark" onMount={m => { monaco.current = m; setMonacoReady(true); }} options={{ fontSize: 24 }} /> </div> <div className="flex-grow sidebar p-4 flex flex-col overflow-y-auto"> <h2 className="text-2xl font-semibold text-center mb-1">In-browser code completion</h2> <div className="text-center"> Made with&nbsp;<a className="text-white ital underline" href="https://github.com/huggingface/transformers.js">๐Ÿค— Transformers.js</a> </div> <label className="mt-3">Model:</label> <select value={model} onChange={e => setModel(e.target.value)} className="p-2.5 bg-gray-50 border border-gray-200 text-gray-900 rounded-lg"> {MODELS.map((value, i) => { return <option key={i} value={value}>{value}</option> })} </select> <div className="mt-3 flex justify-between"> <label>Max new tokens:</label> <label>{maxNewTokens}</label> </div> <input type="range" min="1" max="512" value={maxNewTokens} onChange={(event) => { const newValue = parseInt(event.target.value); setMaxNewTokens(newValue); }} /> <div className="mt-3 flex justify-between"> <label>Temperature:</label> <label>{temperature}</label> </div> <input type="range" min="0" step="0.05" max="1" value={temperature} onChange={(event) => { const newValue = Number(event.target.value); setTemperature(newValue); }} /> <div className="mt-3 flex items-center"> <input id="default-checkbox" type="checkbox" value={doSample} onInput={(event) => { setDoSample(event.target.checked); }} className="w-4 h-4 text-blue-600 rounded focus:ring-blue-600 ring-offset-gray-800 focus:ring-2 bg-gray-700 border-gray-600" /> <label htmlFor="default-checkbox" className="ml-2 font-medium">Sample</label> </div> <div className="mt-3 flex justify-between" style={{ opacity: doSample ? 1 : 0.2 }} > <label>Top K:</label> <label>{topK}</label> </div> <input disabled={!doSample} style={{ opacity: doSample ? 1 : 0.2 }} type="range" min="0" max="50" value={topK} onChange={(event) => { const newValue = parseInt(event.target.value); setTopK(newValue); }} /> <div className="flex-grow"></div> <div className="flex gap-2 justify-center mt-3"> <svg className="w-6 h-6 text-white" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" fill="currentColor" viewBox="0 0 20 20"> <path fillRule="evenodd" d="M10 .333A9.911 9.911 0 0 0 6.866 19.65c.5.092.678-.215.678-.477 0-.237-.01-1.017-.014-1.845-2.757.6-3.338-1.169-3.338-1.169a2.627 2.627 0 0 0-1.1-1.451c-.9-.615.07-.6.07-.6a2.084 2.084 0 0 1 1.518 1.021 2.11 2.11 0 0 0 2.884.823c.044-.503.268-.973.63-1.325-2.2-.25-4.516-1.1-4.516-4.9A3.832 3.832 0 0 1 4.7 7.068a3.56 3.56 0 0 1 .095-2.623s.832-.266 2.726 1.016a9.409 9.409 0 0 1 4.962 0c1.89-1.282 2.717-1.016 2.717-1.016.366.83.402 1.768.1 2.623a3.827 3.827 0 0 1 1.02 2.659c0 3.807-2.319 4.644-4.525 4.889a2.366 2.366 0 0 1 .673 1.834c0 1.326-.012 2.394-.012 2.72 0 .263.18.572.681.475A9.911 9.911 0 0 0 10 .333Z" clipRule="evenodd" /> </svg> <a className="text-white font-normal underline underline-offset-1" href="https://github.com/huggingface/transformers.js/tree/main/examples/code-completion">Source code</a> </div> </div> </div> ); } export default App;
transformers.js/examples/code-completion/src/App.jsx/0
{ "file_path": "transformers.js/examples/code-completion/src/App.jsx", "repo_id": "transformers.js", "token_count": 3961 }
353
@tailwind base; @tailwind components; @tailwind utilities;
transformers.js/examples/cross-encoder/src/index.css/0
{ "file_path": "transformers.js/examples/cross-encoder/src/index.css", "repo_id": "transformers.js", "token_count": 18 }
354
import { useEffect, useState, useRef, useCallback } from 'react'; import Progress from './components/Progress'; import ImageInput from './components/ImageInput'; const IS_WEBGPU_AVAILABLE = !!navigator.gpu; function App() { // Create a reference to the worker object. const worker = useRef(null); // Model loading and progress const [status, setStatus] = useState(null); const [loadingMessage, setLoadingMessage] = useState(''); const [progressItems, setProgressItems] = useState([]); const [task, setTask] = useState('<CAPTION>'); const [text, setText] = useState(''); const [image, setImage] = useState(null); const [result, setResult] = useState(null); const [time, setTime] = useState(null); // We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted. useEffect(() => { if (!worker.current) { // Create the worker if it does not yet exist. worker.current = new Worker(new URL('./worker.js', import.meta.url), { type: 'module' }); } // Create a callback function for messages from the worker thread. const onMessageReceived = (e) => { switch (e.data.status) { case 'loading': // Model file start load: add a new progress item to the list. setStatus('loading'); setLoadingMessage(e.data.data); break; case 'initiate': setProgressItems(prev => [...prev, e.data]); break; case 'progress': // Model file progress: update one of the progress items. setProgressItems( prev => prev.map(item => { if (item.file === e.data.file) { return { ...item, ...e.data } } return item; }) ); break; case 'done': // Model file loaded: remove the progress item from the list. setProgressItems( prev => prev.filter(item => item.file !== e.data.file) ); break; case 'ready': // Pipeline ready: the worker is ready to accept messages. setStatus('ready'); break; case 'complete': setResult(e.data.result); setTime(e.data.time); setStatus('ready'); break; } }; // Attach the callback function as an event listener. worker.current.addEventListener('message', onMessageReceived); // Define a cleanup function for when the component is unmounted. return () => { worker.current.removeEventListener('message', onMessageReceived); }; }, []); const handleClick = useCallback(() => { if (status === null) { setStatus('loading'); worker.current.postMessage({ type: 'load' }); } else { setStatus('running'); worker.current.postMessage({ type: 'run', data: { text, url: image, task } }); } }, [status, task, image, text]); return ( IS_WEBGPU_AVAILABLE ? (<div className="flex flex-col h-screen mx-auto items justify-end text-gray-800 dark:text-gray-200 bg-white dark:bg-gray-900 max-w-[630px]"> {status === 'loading' && ( <div className="flex justify-center items-center fixed w-screen h-screen bg-black z-10 bg-opacity-[92%] top-0 left-0"> <div className="w-[500px]"> <p className="text-center mb-1 text-white text-md">{loadingMessage}</p> {progressItems.map(({ file, progress, total }, i) => ( <Progress key={i} text={file} percentage={progress} total={total} /> ))} </div> </div> )} <div className="h-full overflow-auto scrollbar-thin flex justify-center items-center flex-col relative"> <div className="flex flex-col items-center mb-1 text-center"> <h1 className="text-6xl font-bold mb-2">Florence2 WebGPU</h1> <h2 className="text-xl font-semibold">Powerful vision foundation model running locally in your browser.</h2> </div> <div className="w-full min-h-[220px] flex flex-col justify-center items-center p-2"> <p className="mb-2"> You are about to download <a href="https://huggingface.co/onnx-community/Florence-2-base-ft" target="_blank" rel="noreferrer" className="font-medium underline">Florence-2-base-ft</a>, a 230 million parameter vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks like captioning, object detection, and segmentation. Once loaded, the model (340&nbsp;MB) will be cached and reused when you revisit the page.<br /> <br /> Everything runs locally in your browser using <a href="https://huggingface.co/docs/transformers.js" target="_blank" rel="noreferrer" className="underline">๐Ÿค—&nbsp;Transformers.js</a> and ONNX Runtime Web, meaning no API calls are made to a server for inference. You can even disconnect from the internet after the model has loaded! </p> <div className="flex w-full justify-around m-4"> <div className="flex flex-col gap-2 w-full max-w-[48%]"> <div className="flex flex-col"> <span className="text-sm mb-0.5">Task</span> <select className="border rounded-md p-1" value={task} onChange={(e) => setTask(e.target.value)} > <option value="<CAPTION>">Caption</option> <option value="<DETAILED_CAPTION>">Detailed Caption</option> <option value="<MORE_DETAILED_CAPTION>">More Detailed Caption</option> <option value="<OCR>">OCR</option> <option value="<OCR_WITH_REGION>">OCR with Region</option> <option value="<OD>">Object Detection</option> <option value="<DENSE_REGION_CAPTION>">Dense Region Caption</option> <option value="<CAPTION_TO_PHRASE_GROUNDING>">Caption to Phrase Grounding</option> {/* <option value="<REFERRING_EXPRESSION_SEGMENTATION>">Referring Expression Segmentation</option> */} {/* <option value="<REGION_TO_SEGMENTATION>">Region to Segmentation</option> */} {/* <option value="<OPEN_VOCABULARY_DETECTION>">Open Vocabulary Detection</option> */} {/* <option value="<REGION_TO_CATEGORY>">Region to Category</option> */} {/* <option value="<REGION_TO_DESCRIPTION>">Region to Description</option> */} {/* <option value="<REGION_TO_OCR>">Region to OCR</option> */} {/* <option value="<REGION_PROPOSAL>">Region Proposal</option> */} </select> </div> <div className="flex flex-col"> <span className="text-sm mb-0.5">Input Image</span> <ImageInput className="flex flex-col items-center border border-gray-300 rounded-md cursor-pointer h-[250px]" onImageChange={(file, result) => { worker.current.postMessage({ type: 'reset' }); // Reset image cache setResult(null); setImage(result); }} /> </div> </div> <div className="flex flex-col gap-2 w-full max-w-[48%] justify-end"> { task === '<CAPTION_TO_PHRASE_GROUNDING>' && (<div className="flex flex-col"> <span className="text-sm mb-0.5">Text input</span> <input className="border rounded-md px-2 py-[3.5px]" value={text} onChange={(e) => setText(e.target.value)} /> </div>) } <div className="flex flex-col relative"> <span className="text-sm mb-0.5">Output</span> <div className="flex justify-center border border-gray-300 rounded-md h-[250px]"> {result?.[task] && (<> { typeof result[task] === 'string' ? <p className="pt-4 px-4 text-center max-h-[205px] overflow-y-auto">{result[task]}</p> : <pre className="w-full h-full p-2 overflow-y-auto"> {JSON.stringify(result[task], null, 2)} </pre> } { time && <p className="text-sm text-gray-500 absolute bottom-2 bg-white p-1 rounded border">Execution time: {time.toFixed(2)} ms</p> } </>) } </div> </div> </div> </div> <button className="border px-4 py-2 rounded-lg bg-blue-400 text-white hover:bg-blue-500 disabled:bg-blue-100 disabled:cursor-not-allowed select-none" onClick={handleClick} disabled={status === 'running' || (status !== null && image === null)} > {status === null ? 'Load model' : status === 'running' ? 'Running...' : 'Run model' } </button> </div> </div> </div >) : (<div className="fixed w-screen h-screen bg-black z-10 bg-opacity-[92%] text-white text-2xl font-semibold flex justify-center items-center text-center">WebGPU is not supported<br />by this browser :&#40;</div>) ) } export default App
transformers.js/examples/florence2-webgpu/src/App.jsx/0
{ "file_path": "transformers.js/examples/florence2-webgpu/src/App.jsx", "repo_id": "transformers.js", "token_count": 4599 }
355
import { pipeline } from "@xenova/transformers"; // Use the Singleton pattern to enable lazy construction of the pipeline. // NOTE: We wrap the class in a function to prevent code duplication (see below). const P = () => class PipelineSingleton { static task = 'text-classification'; static model = 'Xenova/distilbert-base-uncased-finetuned-sst-2-english'; static instance = null; static async getInstance(progress_callback = null) { if (this.instance === null) { this.instance = pipeline(this.task, this.model, { progress_callback }); } return this.instance; } } let PipelineSingleton; if (process.env.NODE_ENV !== 'production') { // When running in development mode, attach the pipeline to the // global object so that it's preserved between hot reloads. // For more information, see https://vercel.com/guides/nextjs-prisma-postgres if (!global.PipelineSingleton) { global.PipelineSingleton = P(); } PipelineSingleton = global.PipelineSingleton; } else { PipelineSingleton = P(); } export default PipelineSingleton;
transformers.js/examples/next-server/src/app/classify/pipeline.js/0
{ "file_path": "transformers.js/examples/next-server/src/app/classify/pipeline.js", "repo_id": "transformers.js", "token_count": 370 }
356
import { decode } from 'blurhash' const SIZE = 32; export function blurHashToDataURL(hash) { if (!hash) return undefined const pixels = decode(hash, SIZE, SIZE) const canvas = document.createElement("canvas"); canvas.width = SIZE; canvas.height = SIZE; const ctx = canvas.getContext("2d"); const imageData = ctx.createImageData(SIZE, SIZE); imageData.data.set(pixels); ctx.putImageData(imageData, 0, 0); return canvas.toDataURL(); } function downloadData(url, filename) { // Create an anchor element with the data URL as the href attribute const downloadLink = document.createElement('a'); downloadLink.href = url; // Set the download attribute to specify the desired filename for the downloaded image downloadLink.download = filename; // Trigger the download downloadLink.click(); // Clean up: remove the anchor element from the DOM downloadLink.remove(); } export function downloadImage(url, filename) { fetch(url, { headers: new Headers({ Origin: location.origin, }), mode: 'cors', }) .then((response) => response.blob()) .then((blob) => { let blobUrl = window.URL.createObjectURL(blob) downloadData(blobUrl, filename) }) .catch((e) => console.error(e)) } // Adapted from https://github.com/xenova/transformers.js/blob/c367f9d68b809bbbf81049c808bf6d219d761d23/src/utils/hub.js#L330 export async function getCachedFile(url) { let cache; try { cache = await caches.open('image-database'); const cachedResponse = await cache.match(url); if (cachedResponse) { return await cachedResponse.arrayBuffer(); } } catch (e) { console.warn('Unable to open cache', e); } // No cache, or cache failed to open. Fetch the file. const response = await fetch(url); const buffer = await response.arrayBuffer(); if (cache) { try { // NOTE: We use `new Response(buffer, ...)` instead of `response.clone()` to handle LFS files await cache.put(url, new Response(buffer, { headers: response.headers, })); } catch (e) { console.warn('Unable to cache file', e); } } return buffer; } export async function getCachedJSON(url) { let buffer = await getCachedFile(url); let decoder = new TextDecoder('utf-8'); let jsonData = decoder.decode(buffer); return JSON.parse(jsonData); }
transformers.js/examples/semantic-image-search-client/src/app/utils.js/0
{ "file_path": "transformers.js/examples/semantic-image-search-client/src/app/utils.js", "repo_id": "transformers.js", "token_count": 1001 }
357
import { AutoTokenizer, CLIPTextModelWithProjection } from "@xenova/transformers"; import { createClient } from '@supabase/supabase-js' // Use the Singleton pattern to enable lazy construction of the pipeline. // NOTE: We wrap the class in a function to prevent code duplication (see below). const S = () => class ApplicationSingleton { static model_id = 'Xenova/clip-vit-base-patch16'; static tokenizer = null; static text_model = null; static database = null; static async getInstance() { // Load tokenizer and text model if (this.tokenizer === null) { this.tokenizer = AutoTokenizer.from_pretrained(this.model_id); } if (this.text_model === null) { this.text_model = CLIPTextModelWithProjection.from_pretrained(this.model_id, { quantized: false, }); } if (this.database === null) { this.database = createClient( process.env.SUPABASE_URL, process.env.SUPABASE_ANON_KEY, ) } return Promise.all([ this.tokenizer, this.text_model, this.database, ]); } } let ApplicationSingleton; if (process.env.NODE_ENV !== 'production') { // When running in development mode, attach the pipeline to the // global object so that it's preserved between hot reloads. // For more information, see https://vercel.com/guides/nextjs-prisma-postgres if (!global.ApplicationSingleton) { global.ApplicationSingleton = S(); } ApplicationSingleton = global.ApplicationSingleton; } else { ApplicationSingleton = S(); } export default ApplicationSingleton;
transformers.js/examples/semantic-image-search/src/app/app.js/0
{ "file_path": "transformers.js/examples/semantic-image-search/src/app/app.js", "repo_id": "transformers.js", "token_count": 678 }
358
import './style.css'; import { env, AutoModel, AutoProcessor, RawImage } from '@xenova/transformers'; // Since we will download the model from the Hugging Face Hub, we can skip the local model check env.allowLocalModels = false; // Proxy the WASM backend to prevent the UI from freezing env.backends.onnx.wasm.proxy = true; // Reference the elements that we will need const status = document.getElementById('status'); const container = document.getElementById('container'); const overlay = document.getElementById('overlay'); const canvas = document.getElementById('canvas'); const video = document.getElementById('video'); const thresholdSlider = document.getElementById('threshold'); const thresholdLabel = document.getElementById('threshold-value'); const sizeSlider = document.getElementById('size'); const sizeLabel = document.getElementById('size-value'); const scaleSlider = document.getElementById('scale'); const scaleLabel = document.getElementById('scale-value'); function setStreamSize(width, height) { video.width = canvas.width = Math.round(width); video.height = canvas.height = Math.round(height); } status.textContent = 'Loading model...'; // Load model and processor const model_id = 'Xenova/gelan-c_all'; const model = await AutoModel.from_pretrained(model_id); const processor = await AutoProcessor.from_pretrained(model_id); // Set up controls let scale = 0.5; scaleSlider.addEventListener('input', () => { scale = Number(scaleSlider.value); setStreamSize(video.videoWidth * scale, video.videoHeight * scale); scaleLabel.textContent = scale; }); scaleSlider.disabled = false; let threshold = 0.25; thresholdSlider.addEventListener('input', () => { threshold = Number(thresholdSlider.value); thresholdLabel.textContent = threshold.toFixed(2); }); thresholdSlider.disabled = false; let size = 128; processor.feature_extractor.size = { shortest_edge: size }; sizeSlider.addEventListener('input', () => { size = Number(sizeSlider.value); processor.feature_extractor.size = { shortest_edge: size }; sizeLabel.textContent = size; }); sizeSlider.disabled = false; status.textContent = 'Ready'; const COLOURS = [ "#EF4444", "#4299E1", "#059669", "#FBBF24", "#4B52B1", "#7B3AC2", "#ED507A", "#1DD1A1", "#F3873A", "#4B5563", "#DC2626", "#1852B4", "#18A35D", "#F59E0B", "#4059BE", "#6027A5", "#D63D60", "#00AC9B", "#E64A19", "#272A34" ] // Render a bounding box and label on the image function renderBox([xmin, ymin, xmax, ymax, score, id], [w, h]) { if (score < threshold) return; // Skip boxes with low confidence // Generate a random color for the box const color = COLOURS[id % COLOURS.length]; // Draw the box const boxElement = document.createElement('div'); boxElement.className = 'bounding-box'; Object.assign(boxElement.style, { borderColor: color, left: 100 * xmin / w + '%', top: 100 * ymin / h + '%', width: 100 * (xmax - xmin) / w + '%', height: 100 * (ymax - ymin) / h + '%', }) // Draw label const labelElement = document.createElement('span'); labelElement.textContent = `${model.config.id2label[id]} (${(100 * score).toFixed(2)}%)`; labelElement.className = 'bounding-box-label'; labelElement.style.backgroundColor = color; boxElement.appendChild(labelElement); overlay.appendChild(boxElement); } let isProcessing = false; let previousTime; const context = canvas.getContext('2d', { willReadFrequently: true }); function updateCanvas() { const { width, height } = canvas; context.drawImage(video, 0, 0, width, height); if (!isProcessing) { isProcessing = true; (async function () { // Read the current frame from the video const pixelData = context.getImageData(0, 0, width, height).data; const image = new RawImage(pixelData, width, height, 4); // Process the image and run the model const inputs = await processor(image); const { outputs } = await model(inputs); // Update UI overlay.innerHTML = ''; const sizes = inputs.reshaped_input_sizes[0].reverse(); outputs.tolist().forEach(x => renderBox(x, sizes)); if (previousTime !== undefined) { const fps = 1000 / (performance.now() - previousTime); status.textContent = `FPS: ${fps.toFixed(2)}`; } previousTime = performance.now(); isProcessing = false; })(); } window.requestAnimationFrame(updateCanvas); } // Start the video stream navigator.mediaDevices.getUserMedia( { video: true }, // Ask for video ).then((stream) => { // Set up the video and canvas elements. video.srcObject = stream; video.play(); const videoTrack = stream.getVideoTracks()[0]; const { width, height } = videoTrack.getSettings(); setStreamSize(width * scale, height * scale); // Set container width and height depending on the image aspect ratio const ar = width / height; const [cw, ch] = (ar > 720 / 405) ? [720, 720 / ar] : [405 * ar, 405]; container.style.width = `${cw}px`; container.style.height = `${ch}px`; // Start the animation loop window.requestAnimationFrame(updateCanvas); }).catch((error) => { alert(error); });
transformers.js/examples/video-object-detection/main.js/0
{ "file_path": "transformers.js/examples/video-object-detection/main.js", "repo_id": "transformers.js", "token_count": 1945 }
359
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title>Transformers.js | WebGPU Benchmark</title> </head> <body> <h1> <a href="https://github.com/huggingface/transformers.js" target="_blank">๐Ÿค— Transformers.js</a> WebGPU Benchmark </h1> <p> This benchmark measures the execution time of BERT-based embedding models using the WASM and WebGPU execution providers across different batch sizes. </p> <div id="chart-container"> <canvas id="chart"></canvas> </div> <div> <button id="start" disabled>Start Benchmark</button> <button id="stop" disabled>Stop Benchmark</button> </div> <label id="status"></label> <details open> <summary>Options</summary> <div> <input class="tests" type="checkbox" value="WASM (int8)" data-color="33,150,243" data-device="wasm" data-dtype="int8"> WASM (int8)<br /> <input class="tests" type="checkbox" value="WASM (fp16)" data-color="63,81,181" data-device="wasm" data-dtype="fp16"> WASM (fp16)<br /> <input class="tests" type="checkbox" value="WASM (fp32)" data-color="46,204,113" data-device="wasm" data-dtype="fp32" checked> WASM (fp32)<br /> <!-- <input class="tests" type="checkbox" value="WebGPU (int8)" data-color="233,30,99" data-device="webgpu" data-dtype="int8"> WebGPU (int8)<br /> --> <input class="tests" type="checkbox" value="WebGPU (fp16)" data-color="255,193,7" data-device="webgpu" data-dtype="fp16"> WebGPU (fp16)<br /> <input class="tests" type="checkbox" value="WebGPU (fp32)" data-color="0,150,136" data-device="webgpu" data-dtype="fp32" checked> WebGPU (fp32)<br /> </div> <hr /> <div> <label>Model ID</label> <input id="model-id" value="Xenova/all-MiniLM-L6-v2" /> </div> <div> <label>Batch sizes</label> <input id="batch-sizes" value="1, 2, 4, 8, 16, 32" /> </div> <div> <label>Sequence length</label> <input id="sequence-length" type="number" min="1" max="512" value="512" /> </div> <hr /> <div> <input id="x-scale" type="checkbox" /> Log scale (x) <br /> <input id="y-scale" type="checkbox" /> Log scale (y) <br /> </div> </details> <script type="module" src="/main.js"></script> </body> </html>
transformers.js/examples/webgpu-embedding-benchmark/index.html/0
{ "file_path": "transformers.js/examples/webgpu-embedding-benchmark/index.html", "repo_id": "transformers.js", "token_count": 1004 }
360
export default function ImageIcon(props) { return ( <svg {...props} xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round" > <path d="m2.25 15.75 5.159-5.159a2.25 2.25 0 0 1 3.182 0l5.159 5.159m-1.5-1.5 1.409-1.409a2.25 2.25 0 0 1 3.182 0l2.909 2.909m-18 3.75h16.5a1.5 1.5 0 0 0 1.5-1.5V6a1.5 1.5 0 0 0-1.5-1.5H3.75A1.5 1.5 0 0 0 2.25 6v12a1.5 1.5 0 0 0 1.5 1.5Zm10.5-11.25h.008v.008h-.008V8.25Zm.375 0a.375.375 0 1 1-.75 0 .375.375 0 0 1 .75 0Z" /> </svg> ) }
transformers.js/examples/webgpu-vlm/src/components/icons/ImageIcon.jsx/0
{ "file_path": "transformers.js/examples/webgpu-vlm/src/components/icons/ImageIcon.jsx", "repo_id": "transformers.js", "token_count": 462 }
361
import { useEffect, useState, useRef } from 'react'; import { AudioVisualizer } from './components/AudioVisualizer'; import Progress from './components/Progress'; import { LanguageSelector } from './components/LanguageSelector'; const IS_WEBGPU_AVAILABLE = !!navigator.gpu; const WHISPER_SAMPLING_RATE = 16_000; const MAX_AUDIO_LENGTH = 30; // seconds const MAX_SAMPLES = WHISPER_SAMPLING_RATE * MAX_AUDIO_LENGTH; function App() { // Create a reference to the worker object. const worker = useRef(null); const recorderRef = useRef(null); // Model loading and progress const [status, setStatus] = useState(null); const [loadingMessage, setLoadingMessage] = useState(''); const [progressItems, setProgressItems] = useState([]); // Inputs and outputs const [text, setText] = useState(''); const [tps, setTps] = useState(null); const [language, setLanguage] = useState('en'); // Processing const [recording, setRecording] = useState(false); const [isProcessing, setIsProcessing] = useState(false); const [chunks, setChunks] = useState([]); const [stream, setStream] = useState(null); const audioContextRef = useRef(null); // We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted. useEffect(() => { if (!worker.current) { // Create the worker if it does not yet exist. worker.current = new Worker(new URL('./worker.js', import.meta.url), { type: 'module' }); } // Create a callback function for messages from the worker thread. const onMessageReceived = (e) => { switch (e.data.status) { case 'loading': // Model file start load: add a new progress item to the list. setStatus('loading'); setLoadingMessage(e.data.data); break; case 'initiate': setProgressItems(prev => [...prev, e.data]); break; case 'progress': // Model file progress: update one of the progress items. setProgressItems( prev => prev.map(item => { if (item.file === e.data.file) { return { ...item, ...e.data } } return item; }) ); break; case 'done': // Model file loaded: remove the progress item from the list. setProgressItems( prev => prev.filter(item => item.file !== e.data.file) ); break; case 'ready': // Pipeline ready: the worker is ready to accept messages. setStatus('ready'); recorderRef.current?.start(); break; case 'start': { // Start generation setIsProcessing(true); // Request new data from the recorder recorderRef.current?.requestData(); } break; case 'update': { // Generation update: update the output text. const { tps } = e.data; setTps(tps); } break; case 'complete': // Generation complete: re-enable the "Generate" button setIsProcessing(false); setText(e.data.output); break; } }; // Attach the callback function as an event listener. worker.current.addEventListener('message', onMessageReceived); // Define a cleanup function for when the component is unmounted. return () => { worker.current.removeEventListener('message', onMessageReceived); }; }, []); useEffect(() => { if (recorderRef.current) return; // Already set if (navigator.mediaDevices.getUserMedia) { navigator.mediaDevices.getUserMedia({ audio: true }) .then(stream => { setStream(stream); recorderRef.current = new MediaRecorder(stream); audioContextRef.current = new AudioContext({ sampleRate: WHISPER_SAMPLING_RATE }); recorderRef.current.onstart = () => { setRecording(true); setChunks([]); } recorderRef.current.ondataavailable = (e) => { if (e.data.size > 0) { setChunks((prev) => [...prev, e.data]); } else { // Empty chunk received, so we request new data after a short timeout setTimeout(() => { recorderRef.current.requestData(); }, 25); } }; recorderRef.current.onstop = () => { setRecording(false); }; }) .catch(err => console.error("The following error occurred: ", err)); } else { console.error("getUserMedia not supported on your browser!"); } return () => { recorderRef.current?.stop(); recorderRef.current = null; }; }, []); useEffect(() => { if (!recorderRef.current) return; if (!recording) return; if (isProcessing) return; if (status !== 'ready') return; if (chunks.length > 0) { // Generate from data const blob = new Blob(chunks, { type: recorderRef.current.mimeType }); const fileReader = new FileReader(); fileReader.onloadend = async () => { const arrayBuffer = fileReader.result; const decoded = await audioContextRef.current.decodeAudioData(arrayBuffer); let audio = decoded.getChannelData(0); if (audio.length > MAX_SAMPLES) { // Get last MAX_SAMPLES audio = audio.slice(-MAX_SAMPLES); } worker.current.postMessage({ type: 'generate', data: { audio, language } }); } fileReader.readAsArrayBuffer(blob); } else { recorderRef.current?.requestData(); } }, [status, recording, isProcessing, chunks, language]); return ( IS_WEBGPU_AVAILABLE ? (<div className="flex flex-col h-screen mx-auto justify-end text-gray-800 dark:text-gray-200 bg-white dark:bg-gray-900"> {( <div className="h-full overflow-auto scrollbar-thin flex justify-center items-center flex-col relative"> <div className="flex flex-col items-center mb-1 max-w-[400px] text-center"> <img src="logo.png" width="50%" height="auto" className="block"></img> <h1 className="text-4xl font-bold mb-1">Whisper WebGPU</h1> <h2 className="text-xl font-semibold">Real-time in-browser speech recognition</h2> </div> <div className="flex flex-col items-center px-4"> {status === null && (<> <p className="max-w-[480px] mb-4"> <br /> You are about to load <a href="https://huggingface.co/onnx-community/whisper-base" target="_blank" rel="noreferrer" className="font-medium underline">whisper-base</a>, a 73 million parameter speech recognition model that is optimized for inference on the web. Once downloaded, the model (~200&nbsp;MB) will be cached and reused when you revisit the page.<br /> <br /> Everything runs directly in your browser using <a href="https://huggingface.co/docs/transformers.js" target="_blank" rel="noreferrer" className="underline">๐Ÿค—&nbsp;Transformers.js</a> and ONNX Runtime Web, meaning no data is sent to a server. You can even disconnect from the internet after the model has loaded! </p> <button className="border px-4 py-2 rounded-lg bg-blue-400 text-white hover:bg-blue-500 disabled:bg-blue-100 disabled:cursor-not-allowed select-none" onClick={() => { worker.current.postMessage({ type: 'load' }); setStatus('loading'); }} disabled={status !== null} > Load model </button> </>)} <div className="w-[500px] p-2"> <AudioVisualizer className="w-full rounded-lg" stream={stream} /> {status === 'ready' && <div className="relative"> <p className="w-full h-[80px] overflow-y-auto overflow-wrap-anywhere border rounded-lg p-2">{text}</p> {tps && <span className="absolute bottom-0 right-0 px-1">{tps.toFixed(2)} tok/s</span>} </div>} </div> {status === 'ready' && <div className='relative w-full flex justify-center'> <LanguageSelector language={language} setLanguage={(e) => { recorderRef.current?.stop(); setLanguage(e); recorderRef.current?.start(); }} /> <button className="border rounded-lg px-2 absolute right-2" onClick={() => { recorderRef.current?.stop(); recorderRef.current?.start(); }}>Reset</button> </div> } {status === 'loading' && ( <div className="w-full max-w-[500px] text-left mx-auto p-4"> <p className="text-center">{loadingMessage}</p> {progressItems.map(({ file, progress, total }, i) => ( <Progress key={i} text={file} percentage={progress} total={total} /> ))} </div> )} </div> </div> )} </div>) : (<div className="fixed w-screen h-screen bg-black z-10 bg-opacity-[92%] text-white text-2xl font-semibold flex justify-center items-center text-center">WebGPU is not supported<br />by this browser :&#40;</div>) ) } export default App
transformers.js/examples/webgpu-whisper/src/App.jsx/0
{ "file_path": "transformers.js/examples/webgpu-whisper/src/App.jsx", "repo_id": "transformers.js", "token_count": 4256 }
362
function titleCase(str) { str = str.toLowerCase(); return (str.match(/\w+.?/g) || []) .map((word) => { return word.charAt(0).toUpperCase() + word.slice(1); }) .join(""); } // List of supported languages: // https://help.openai.com/en/articles/7031512-whisper-api-faq // https://github.com/openai/whisper/blob/248b6cb124225dd263bb9bd32d060b6517e067f8/whisper/tokenizer.py#L79 const LANGUAGES = { en: "english", zh: "chinese", de: "german", es: "spanish/castilian", ru: "russian", ko: "korean", fr: "french", ja: "japanese", pt: "portuguese", tr: "turkish", pl: "polish", ca: "catalan/valencian", nl: "dutch/flemish", ar: "arabic", sv: "swedish", it: "italian", id: "indonesian", hi: "hindi", fi: "finnish", vi: "vietnamese", he: "hebrew", uk: "ukrainian", el: "greek", ms: "malay", cs: "czech", ro: "romanian/moldavian/moldovan", da: "danish", hu: "hungarian", ta: "tamil", no: "norwegian", th: "thai", ur: "urdu", hr: "croatian", bg: "bulgarian", lt: "lithuanian", la: "latin", mi: "maori", ml: "malayalam", cy: "welsh", sk: "slovak", te: "telugu", fa: "persian", lv: "latvian", bn: "bengali", sr: "serbian", az: "azerbaijani", sl: "slovenian", kn: "kannada", et: "estonian", mk: "macedonian", br: "breton", eu: "basque", is: "icelandic", hy: "armenian", ne: "nepali", mn: "mongolian", bs: "bosnian", kk: "kazakh", sq: "albanian", sw: "swahili", gl: "galician", mr: "marathi", pa: "punjabi/panjabi", si: "sinhala/sinhalese", km: "khmer", sn: "shona", yo: "yoruba", so: "somali", af: "afrikaans", oc: "occitan", ka: "georgian", be: "belarusian", tg: "tajik", sd: "sindhi", gu: "gujarati", am: "amharic", yi: "yiddish", lo: "lao", uz: "uzbek", fo: "faroese", ht: "haitian creole/haitian", ps: "pashto/pushto", tk: "turkmen", nn: "nynorsk", mt: "maltese", sa: "sanskrit", lb: "luxembourgish/letzeburgesch", my: "myanmar/burmese", bo: "tibetan", tl: "tagalog", mg: "malagasy", as: "assamese", tt: "tatar", haw: "hawaiian", ln: "lingala", ha: "hausa", ba: "bashkir", jw: "javanese", su: "sundanese", }; function LanguageSelector({ language, setLanguage, ...props }) { const handleLanguageChange = (event) => { setLanguage(event.target.value); }; const names = Object.values(LANGUAGES).map(titleCase); return ( <select {...props} value={language} onChange={handleLanguageChange}> {Object.keys(LANGUAGES).map((key, i) => ( <option key={key} value={key}> {names[i]} </option> ))} </select> ); } export default LanguageSelector
transformers.js/examples/whisper-word-timestamps/src/components/LanguageSelector.jsx/0
{ "file_path": "transformers.js/examples/whisper-word-timestamps/src/components/LanguageSelector.jsx", "repo_id": "transformers.js", "token_count": 1603 }
363
import { useState, useRef, useEffect, useCallback } from 'react' import './App.css' const PLACEHOLDER_REVIEWS = [ // battery/charging problems "Disappointed with the battery life! The phone barely lasts half a day with regular use. Considering how much I paid for it, I expected better performance in this department.", "I bought this phone a week ago, and I'm already frustrated with the battery life. It barely lasts half a day with normal usage. I expected more from a supposedly high-end device", "The charging port is so finicky. Sometimes it takes forever to charge, and other times it doesn't even recognize the charger. Frustrating experience!", // overheating "This phone heats up way too quickly, especially when using demanding apps. It's uncomfortable to hold, and I'm concerned it might damage the internal components over time. Not what I expected", "This phone is like holding a hot potato. Video calls turn it into a scalding nightmare. Seriously, can't it keep its cool?", "Forget about a heatwave outside; my phone's got its own. It's like a little portable heater. Not what I signed up for.", // poor build quality "I dropped the phone from a short distance, and the screen cracked easily. Not as durable as I expected from a flagship device.", "Took a slight bump in my bag, and the frame got dinged. Are we back in the flip phone era?", "So, my phone's been in my pocket with just keys โ€“ no ninja moves or anything. Still, it managed to get some scratches. Disappointed with the build quality.", // software "The software updates are a nightmare. Each update seems to introduce new bugs, and it takes forever for them to be fixed.", "Constant crashes and freezes make me want to throw it into a black hole.", "Every time I open Instagram, my phone freezes and crashes. It's so frustrating!", // other "I'm not sure what to make of this phone. It's not bad, but it's not great either. I'm on the fence about it.", "I hate the color of this phone. It's so ugly!", "This phone sucks! I'm returning it." ].sort(() => Math.random() - 0.5) const PLACEHOLDER_SECTIONS = [ 'Battery and charging problems', 'Overheating', 'Poor build quality', 'Software issues', 'Other', ]; function App() { const [text, setText] = useState(PLACEHOLDER_REVIEWS.join('\n')); const [sections, setSections] = useState( PLACEHOLDER_SECTIONS.map(title => ({ title, items: [] })) ); const [status, setStatus] = useState('idle'); // Create a reference to the worker object. const worker = useRef(null); // We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted. useEffect(() => { if (!worker.current) { // Create the worker if it does not yet exist. worker.current = new Worker(new URL('./worker.js', import.meta.url), { type: 'module' }); } // Create a callback function for messages from the worker thread. const onMessageReceived = (e) => { const status = e.data.status; if (status === 'initiate') { setStatus('loading'); } else if (status === 'ready') { setStatus('ready'); } else if (status === 'output') { const { sequence, labels, scores } = e.data.output; // Threshold for classification const label = scores[0] > 0.5 ? labels[0] : 'Other'; const sectionID = sections.map(x => x.title).indexOf(label) ?? sections.length - 1; setSections((sections) => { const newSections = [...sections] newSections[sectionID] = { ...newSections[sectionID], items: [...newSections[sectionID].items, sequence] } return newSections }) } else if (status === 'complete') { setStatus('idle'); } }; // Attach the callback function as an event listener. worker.current.addEventListener('message', onMessageReceived); // Define a cleanup function for when the component is unmounted. return () => worker.current.removeEventListener('message', onMessageReceived); }, [sections]); const classify = useCallback(() => { setStatus('processing'); worker.current.postMessage({ text, labels: sections.slice(0, sections.length - 1).map(section => section.title) }); }, [text, sections]) const busy = status !== 'idle'; return ( <div className='flex flex-col h-full'> <textarea className='border w-full p-1 h-1/2' value={text} onChange={e => setText(e.target.value)} ></textarea> <div className='flex flex-col justify-center items-center m-2 gap-1'> <button className='border py-1 px-2 bg-blue-400 rounded text-white text-lg font-medium disabled:opacity-50 disabled:cursor-not-allowed' disabled={busy} onClick={classify}>{ !busy ? 'Categorize' : status === 'loading' ? 'Model loading...' : 'Processing' }</button> <div className='flex gap-1'> <button className='border py-1 px-2 bg-green-400 rounded text-white text-sm font-medium' onClick={e => { setSections((sections) => { const newSections = [...sections]; // add at position 2 from the end newSections.splice(newSections.length - 1, 0, { title: 'New Category', items: [], }) return newSections; }) }}>Add category</button> <button className='border py-1 px-2 bg-red-400 rounded text-white text-sm font-medium' disabled={sections.length <= 1} onClick={e => { setSections((sections) => { const newSections = [...sections]; newSections.splice(newSections.length - 2, 1); // Remove second last element return newSections; }) }}>Remove category</button> <button className='border py-1 px-2 bg-orange-400 rounded text-white text-sm font-medium' onClick={e => { setSections((sections) => (sections.map(section => ({ ...section, items: [], })))) }}>Clear</button> </div> </div> <div className='flex justify-between flex-grow overflow-x-auto max-h-[40%]'> {sections.map((section, index) => ( <div key={index} className="flex flex-col w-full" > <input disabled={section.title === 'Other'} className="w-full border px-1 text-center" value={section.title} onChange={e => { setSections((sections) => { const newSections = [...sections]; newSections[index].title = e.target.value; return newSections; }) }}></input> <div className="overflow-y-auto h-full border"> {section.items.map((item, index) => ( <div className="m-2 border bg-red-50 rounded p-1 text-sm" key={index}>{item} </div> ))} </div> </div> ))} </div> </div> ) } export default App
transformers.js/examples/zero-shot-classification/src/App.jsx/0
{ "file_path": "transformers.js/examples/zero-shot-classification/src/App.jsx", "repo_id": "transformers.js", "token_count": 3044 }
364
/** * @module generation/logits_sampler */ import { Callable } from "../utils/generic.js"; import { Tensor, topk } from "../utils/tensor.js"; import { max, softmax, } from '../utils/maths.js'; import { GenerationConfig } from '../generation/configuration_utils.js'; /** * Sampler is a base class for all sampling methods used for text generation. */ export class LogitsSampler extends Callable { /** * Creates a new Sampler object with the specified generation config. * @param {GenerationConfig} generation_config The generation config. */ constructor(generation_config) { super(); this.generation_config = generation_config; } /** * Executes the sampler, using the specified logits. * @param {Tensor} logits * @returns {Promise<[bigint, number][]>} */ async _call(logits) { // Sample from logits, of dims [batch, sequence_length, vocab_size]. // If index is specified, sample from [batch, index, vocab_size]. return this.sample(logits); } /** * Abstract method for sampling the logits. * @param {Tensor} logits * @throws {Error} If not implemented in subclass. * @returns {Promise<[bigint, number][]>} */ async sample(logits) { throw Error("sample should be implemented in subclasses.") } /** * Returns the specified logits as an array, with temperature applied. * @param {Tensor} logits * @param {number} index * @returns {Float32Array} */ getLogits(logits, index) { let vocabSize = logits.dims.at(-1); let logs = /** @type {Float32Array} */(logits.data); if (index === -1) { logs = logs.slice(-vocabSize); } else { let startIndex = index * vocabSize; logs = logs.slice(startIndex, startIndex + vocabSize); } return logs; } /** * Selects an item randomly based on the specified probabilities. * @param {import("../transformers.js").DataArray} probabilities An array of probabilities to use for selection. * @returns {number} The index of the selected item. */ randomSelect(probabilities) { // Return index of chosen item let sumProbabilities = 0; for (let i = 0; i < probabilities.length; ++i) { sumProbabilities += probabilities[i]; } let r = Math.random() * sumProbabilities; for (let i = 0; i < probabilities.length; ++i) { r -= probabilities[i]; if (r <= 0) { return i; } } return 0; // return first (most probable) as a fallback } /** * Returns a Sampler object based on the specified options. * @param {GenerationConfig} generation_config An object containing options for the sampler. * @returns {LogitsSampler} A Sampler object. */ static getSampler(generation_config) { // - *greedy decoding*: `num_beams=1` and `do_sample=False` // - *contrastive search*: `penalty_alpha>0` and `top_k>1` // - *multinomial sampling*: `num_beams=1` and `do_sample=True` // - *beam-search decoding*: `num_beams>1` and `do_sample=False` // - *beam-search multinomial sampling*: `num_beams>1` and `do_sample=True` // - *diverse beam-search decoding*: `num_beams>1` and `num_beam_groups>1` // - *constrained beam-search decoding*: `constraints!=None` or `force_words_ids!=None` // NOTE: beam search is implemented directly into the generation function if (generation_config.do_sample) { return new MultinomialSampler(generation_config); } else if (generation_config.num_beams > 1) { return new BeamSearchSampler(generation_config); } else { if (generation_config.num_return_sequences > 1) { throw Error(`num_return_sequences has to be 1 when doing greedy search, but is ${generation_config.num_return_sequences}.`) } return new GreedySampler(generation_config); } } } /** * Class representing a Greedy Sampler. */ class GreedySampler extends LogitsSampler { /** * Sample the maximum probability of a given logits tensor. * @param {Tensor} logits * @returns {Promise<[bigint, number][]>} An array with a single tuple, containing the index of the maximum value and a meaningless score (since this is a greedy search). */ async sample(logits) { // NOTE: no need to do log_softmax here since we only take the maximum const argmax = max(logits.data)[1]; // Note: score is meaningless in this context, since we are performing // greedy search (p = 1 => log(p) = 0) return [ [BigInt(argmax), 0] ]; } } /** * Class representing a MultinomialSampler. */ class MultinomialSampler extends LogitsSampler { /** * Sample from the logits. * @param {Tensor} logits * @returns {Promise<[bigint, number][]>} */ async sample(logits) { let k = logits.dims.at(-1); // defaults to vocab size if (this.generation_config.top_k > 0) { k = Math.min(this.generation_config.top_k, k); } // Get top k tokens const [v, i] = await topk(logits, k); // Compute softmax over logits const probabilities = softmax(/** @type {Float32Array} */(v.data)); return Array.from({ length: this.generation_config.num_beams }, () => { const sampledIndex = this.randomSelect(probabilities); return [ i.data[sampledIndex], // token id Math.log(probabilities[sampledIndex]), // score ]; }); } } /** * Class representing a BeamSearchSampler. */ class BeamSearchSampler extends LogitsSampler { /** * Sample from the logits. * @param {Tensor} logits * @returns {Promise<[bigint, number][]>} */ async sample(logits) { let k = logits.dims.at(-1); // defaults to vocab size if (this.generation_config.top_k > 0) { k = Math.min(this.generation_config.top_k, k); } // Get top k tokens const [v, i] = await topk(logits, k); // Compute softmax over logits const probabilities = softmax(/** @type {Float32Array} */(v.data)); return Array.from({ length: this.generation_config.num_beams }, (_, x) => { return [ i.data[x], // token id Math.log(probabilities[x]), // score ]; }); } }
transformers.js/src/generation/logits_sampler.js/0
{ "file_path": "transformers.js/src/generation/logits_sampler.js", "repo_id": "transformers.js", "token_count": 2735 }
365
import { ImageProcessor, } from "../../base/image_processors_utils.js"; export class DeiTImageProcessor extends ImageProcessor { } export class DeiTFeatureExtractor extends DeiTImageProcessor { }
transformers.js/src/models/deit/image_processing_deit.js/0
{ "file_path": "transformers.js/src/models/deit/image_processing_deit.js", "repo_id": "transformers.js", "token_count": 63 }
366
export * from './beit/image_processing_beit.js' export * from './bit/image_processing_bit.js' export * from './chinese_clip/image_processing_chinese_clip.js' export * from './clip/image_processing_clip.js' export * from './convnext/image_processing_convnext.js' export * from './deit/image_processing_deit.js' export * from './detr/image_processing_detr.js' export * from './dinov3_vit/image_processing_dinov3_vit.js' export * from './donut/image_processing_donut.js' export * from './dpt/image_processing_dpt.js' export * from './efficientnet/image_processing_efficientnet.js' export * from './glpn/image_processing_glpn.js' export * from './grounding_dino/image_processing_grounding_dino.js' export * from './idefics3/image_processing_idefics3.js' export * from './janus/image_processing_janus.js' export * from './jina_clip/image_processing_jina_clip.js' export * from './llava_onevision/image_processing_llava_onevision.js' export * from './mask2former/image_processing_mask2former.js' export * from './maskformer/image_processing_maskformer.js' export * from './mobilenet_v1/image_processing_mobilenet_v1.js' export * from './mobilenet_v2/image_processing_mobilenet_v2.js' export * from './mobilenet_v3/image_processing_mobilenet_v3.js' export * from './mobilenet_v4/image_processing_mobilenet_v4.js' export * from './mobilevit/image_processing_mobilevit.js' export * from './nougat/image_processing_nougat.js' export * from './owlv2/image_processing_owlv2.js' export * from './owlvit/image_processing_owlvit.js' export * from './phi3_v/image_processing_phi3_v.js' export * from './pvt/image_processing_pvt.js' export * from './qwen2_vl/image_processing_qwen2_vl.js' export * from './rt_detr/image_processing_rt_detr.js' export * from './sam/image_processing_sam.js' export * from './segformer/image_processing_segformer.js' export * from './siglip/image_processing_siglip.js' export * from './smolvlm/image_processing_smolvlm.js' export * from './swin2sr/image_processing_swin2sr.js' export * from './vit/image_processing_vit.js' export * from './vitmatte/image_processing_vitmatte.js' export * from './vitpose/image_processing_vitpose.js' export * from './yolos/image_processing_yolos.js'
transformers.js/src/models/image_processors.js/0
{ "file_path": "transformers.js/src/models/image_processors.js", "repo_id": "transformers.js", "token_count": 829 }
367
import { Processor } from "../../base/processing_utils.js"; import { AutoImageProcessor } from "../auto/image_processing_auto.js"; export class SamProcessor extends Processor { static image_processor_class = AutoImageProcessor async _call(...args) { return await this.image_processor(...args); } post_process_masks(...args) { // @ts-ignore return this.image_processor.post_process_masks(...args); } reshape_input_points(...args) { // @ts-ignore return this.image_processor.reshape_input_points(...args); } }
transformers.js/src/models/sam/processing_sam.js/0
{ "file_path": "transformers.js/src/models/sam/processing_sam.js", "repo_id": "transformers.js", "token_count": 216 }
368
import { FeatureExtractor, validate_audio_inputs } from "../../base/feature_extraction_utils.js"; import { Tensor } from "../../utils/tensor.js"; export class Wav2Vec2FeatureExtractor extends FeatureExtractor { /** * @param {Float32Array} input_values * @returns {Float32Array} */ _zero_mean_unit_var_norm(input_values) { // TODO support batch? const sum = input_values.reduce((a, b) => a + b, 0); const mean = sum / input_values.length; const variance = input_values.reduce((a, b) => a + (b - mean) ** 2, 0) / input_values.length; return input_values.map(x => (x - mean) / Math.sqrt(variance + 1e-7)); } /** * Asynchronously extracts features from a given audio using the provided configuration. * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. * @returns {Promise<{ input_values: Tensor; attention_mask: Tensor }>} A Promise resolving to an object containing the extracted input features and attention mask as Tensors. */ async _call(audio) { validate_audio_inputs(audio, 'Wav2Vec2FeatureExtractor'); if (audio instanceof Float64Array) { audio = new Float32Array(audio); } let input_values = audio; // zero-mean and unit-variance normalization if (this.config.do_normalize) { input_values = this._zero_mean_unit_var_norm(input_values); } // TODO: allow user to pass in attention mask const shape = [1, input_values.length]; return { input_values: new Tensor('float32', input_values, shape), attention_mask: new Tensor('int64', new BigInt64Array(input_values.length).fill(1n), shape) }; } }
transformers.js/src/models/wav2vec2/feature_extraction_wav2vec2.js/0
{ "file_path": "transformers.js/src/models/wav2vec2/feature_extraction_wav2vec2.js", "repo_id": "transformers.js", "token_count": 700 }
369
/** * @file Custom data structures. * * These are only used internally, meaning an end-user shouldn't * need to access anything here. * * @module utils/data-structures */ /** * Efficient Heap-based Implementation of a Priority Queue. * It uses an array-based binary heap, where the root is at index `0`, and the * children of node `i` are located at indices `2i + 1` and `2i + 2`, respectively. * * Adapted from the following sources: * - https://stackoverflow.com/a/42919752/13989043 (original) * - https://github.com/belladoreai/llama-tokenizer-js (minor improvements) */ export class PriorityQueue { /** * Create a new PriorityQueue. * @param {function(any, any): boolean} comparator Comparator function to determine priority. Defaults to a MaxHeap. */ constructor(comparator = (a, b) => a > b, maxSize = Infinity) { this._heap = []; this._comparator = comparator; this._maxSize = maxSize; } /** * The size of the queue */ get size() { return this._heap.length; } /** * Check if the queue is empty. * @returns {boolean} `true` if the queue is empty, `false` otherwise. */ isEmpty() { return this.size === 0; } /** * Return the element with the highest priority in the queue. * @returns {any} The highest priority element in the queue. */ peek() { return this._heap[0]; } /** * Add one or more elements to the queue. * @param {...any} values The values to push into the queue. * @returns {number} The new size of the queue. */ push(...values) { return this.extend(values); } /** * Add multiple elements to the queue. * @param {any[]} values The values to push into the queue. * @returns {number} The new size of the queue. */ extend(values) { for (const value of values) { if (this.size < this._maxSize) { this._heap.push(value); this._siftUp(); } else { // Get index of value with the lowest priority const smallest = this._smallest(); // If the new value has higher priority than the smallest value in the heap // then replace the smallest value with the new value and update the heap if (this._comparator(value, this._heap[smallest])) { this._heap[smallest] = value; this._siftUpFrom(smallest); } } } return this.size; } /** * Remove and return the element with the highest priority in the queue. * @returns {any} The element with the highest priority in the queue. */ pop() { const poppedValue = this.peek(); const bottom = this.size - 1; if (bottom > 0) { this._swap(0, bottom); } this._heap.pop(); this._siftDown(); return poppedValue; } /** * Replace the element with the highest priority in the queue with a new value. * @param {*} value The new value. * @returns {*} The replaced value. */ replace(value) { const replacedValue = this.peek(); this._heap[0] = value; this._siftDown(); return replacedValue; } /** * Compute the index for the parent of the node at index `i`. * @param {number} i The index of the node to get the parent of. * @returns {number} The index of the parent node. * @private */ _parent(i) { return ((i + 1) >>> 1) - 1; } /** * Compute the index for the left child of the node at index `i`. * @param {number} i The index of the node to get the left child of. * @returns {number} The index of the left child. * @private */ _left(i) { return (i << 1) + 1; } /** * Compute the index for the right child of the node at index `i`. * @param {number} i The index of the node to get the right child of. * @returns {number} The index of the right child. * @private */ _right(i) { return (i + 1) << 1; } /** * Check if the element at index `i` is greater than the element at index `j`. * @param {number} i The index of the first element to compare. * @param {number} j The index of the second element to compare. * @returns {boolean} `true` if the element at index `i` is greater than the element at index `j`, `false` otherwise. * @private */ _greater(i, j) { return this._comparator(this._heap[i], this._heap[j]); } /** * Swap the elements at indices `i` and `j`. * @param {number} i The index of the first element to swap. * @param {number} j The index of the second element to swap. * @private */ _swap(i, j) { const temp = this._heap[i]; this._heap[i] = this._heap[j]; this._heap[j] = temp; } /** * Maintain the heap property by updating positions in the heap, * starting at the last element and moving up the heap. * @private */ _siftUp() { this._siftUpFrom(this.size - 1); } /** * Helper function to sift up from a given node. * @param {number} node The index of the node to start sifting up from. */ _siftUpFrom(node) { while (node > 0 && this._greater(node, this._parent(node))) { this._swap(node, this._parent(node)); node = this._parent(node); } } /** * Maintain the heap property by updating positions in the heap, * starting at the first element and moving down the heap. * @private */ _siftDown() { let node = 0; while ( (this._left(node) < this.size && this._greater(this._left(node), node)) || (this._right(node) < this.size && this._greater(this._right(node), node)) ) { const maxChild = (this._right(node) < this.size && this._greater(this._right(node), this._left(node))) ? this._right(node) : this._left(node); this._swap(node, maxChild); node = maxChild; } } /** * Get the index of the smallest element in the heap. Since we use an array-based heap, * the index can be computed without needing to traverse the heap. * @private */ _smallest() { return (2 ** (Math.floor(Math.log2(this.size))) - 1); } } /** * A trie structure to efficiently store and search for strings. */ export class CharTrie { constructor() { this.root = CharTrieNode.default(); } /** * Adds one or more `texts` to the trie. * @param {string[]} texts The strings to add to the trie. */ extend(texts) { for (const text of texts) { this.push(text); } } /** * Adds text to the trie. * @param {string} text The string to add to the trie. */ push(text) { let node = this.root; for (const ch of text) { let child = node.children.get(ch); if (child === undefined) { child = CharTrieNode.default(); node.children.set(ch, child); } node = child; } node.isLeaf = true; } /** * Searches the trie for all strings with a common prefix of `text`. * @param {string} text The common prefix to search for. * @yields {string} Each string in the trie that has `text` as a prefix. */ *commonPrefixSearch(text) { let node = this.root; if (node === undefined) return; let prefix = ""; for (const ch of text) { prefix += ch; node = node.children.get(ch); if (node === undefined) return; if (node.isLeaf) { yield prefix; } } } } /** * Represents a node in a character trie. */ class CharTrieNode { /** * Create a new CharTrieNode. * @param {boolean} isLeaf Whether the node is a leaf node or not. * @param {Map<string, CharTrieNode>} children A map containing the node's children, where the key is a character and the value is a `CharTrieNode`. */ constructor(isLeaf, children) { this.isLeaf = isLeaf; this.children = children; } /** * Returns a new `CharTrieNode` instance with default values. * @returns {CharTrieNode} A new `CharTrieNode` instance with `isLeaf` set to `false` and an empty `children` map. */ static default() { return new CharTrieNode(false, new Map()); } } /** * A lattice data structure to be used for tokenization. */ export class TokenLattice { /** * Creates a new TokenLattice instance. * * @param {string} sentence The input sentence to be tokenized. * @param {number} bosTokenId The beginning-of-sequence token ID. * @param {number} eosTokenId The end-of-sequence token ID. */ constructor(sentence, bosTokenId, eosTokenId) { this.chars = Array.from(sentence); this.len = this.chars.length; this.bosTokenId = bosTokenId; this.eosTokenId = eosTokenId; this.nodes = []; this.beginNodes = Array.from({ length: this.len + 1 }, () => []); this.endNodes = Array.from({ length: this.len + 1 }, () => []); const bos = new TokenLatticeNode(this.bosTokenId, 0, 0, 0, 0.0); const eos = new TokenLatticeNode(this.eosTokenId, 1, this.len, 0, 0.0); this.nodes.push(bos.clone()); this.nodes.push(eos.clone()); this.beginNodes[this.len].push(eos); this.endNodes[0].push(bos); } /** * Inserts a new token node into the token lattice. * * @param {number} pos The starting position of the token. * @param {number} length The length of the token. * @param {number} score The score of the token. * @param {number} tokenId The token ID of the token. */ insert(pos, length, score, tokenId) { const nodeId = this.nodes.length; const node = new TokenLatticeNode(tokenId, nodeId, pos, length, score); this.beginNodes[pos].push(node); this.endNodes[pos + length].push(node); this.nodes.push(node); } /** * Implements the Viterbi algorithm to compute the most likely sequence of tokens. * * @returns {TokenLatticeNode[]} The most likely sequence of tokens. */ viterbi() { const len = this.len; let pos = 0; while (pos <= len) { if (this.beginNodes[pos].length == 0) { return []; } for (let rnode of this.beginNodes[pos]) { rnode.prev = null; let bestScore = 0.0; let bestNode = null; for (let lnode of this.endNodes[pos]) { const score = lnode.backtraceScore + rnode.score; if (bestNode === null || score > bestScore) { bestNode = lnode.clone(); bestScore = score; } } if (bestNode !== null) { rnode.prev = bestNode; rnode.backtraceScore = bestScore; } else { return []; } } ++pos; } const results = []; const root = this.beginNodes[len][0]; const prev = root.prev; if (prev === null) { return []; } let node = prev.clone(); while (node.prev !== null) { results.push(node.clone()); const n = node.clone(); node = n.prev.clone(); } results.reverse(); return results; } /** * @param {TokenLatticeNode} node * @returns {string} The array of nodes representing the most likely sequence of tokens. */ piece(node) { return this.chars.slice(node.pos, node.pos + node.length).join(''); } /** * @returns {string[]} The most likely sequence of tokens. */ tokens() { const nodes = this.viterbi(); return nodes.map(x => this.piece(x)); } /** * @returns {number[]} The most likely sequence of token ids. */ tokenIds() { const nodes = this.viterbi(); return nodes.map(x => x.tokenId); } } class TokenLatticeNode { /** * Represents a node in a token lattice for a given sentence. * @param {number} tokenId The ID of the token associated with this node. * @param {number} nodeId The ID of this node. * @param {number} pos The starting position of the token in the sentence. * @param {number} length The length of the token. * @param {number} score The score associated with the token. */ constructor(tokenId, nodeId, pos, length, score) { this.tokenId = tokenId; this.nodeId = nodeId; this.pos = pos; this.length = length; this.score = score; this.prev = null; this.backtraceScore = 0.0; } /** * Returns a clone of this node. * @returns {TokenLatticeNode} A clone of this node. */ clone() { const n = new TokenLatticeNode(this.tokenId, this.nodeId, this.pos, this.length, this.score); n.prev = this.prev; n.backtraceScore = this.backtraceScore; return n; } } /** * A data structure which uses a trie to split a string into tokens based on a dictionary. * It can also use a regular expression to preprocess the input text before splitting. * * NOTE: To ensure multi-byte characters are handled correctly, we operate at byte-level instead of character-level. */ export class DictionarySplitter { /** * @param {string[]} dictionary The dictionary of words to use for splitting. */ constructor(dictionary) { this.trie = this._buildTrie(dictionary); } /** * Builds a trie from the given dictionary. * @param {string[]} dictionary The dictionary of words to build the trie from. * @returns {Object} The root node of the trie. * @private */ _buildTrie(dictionary) { const trie = Object.create(null); for (const word of dictionary) { let node = trie; for (let i = 0; i < word.length; ++i) { node = (node[word[i]] ??= Object.create(null)); } node.end = word; } return trie; } /** * Splits the input text into tokens based on the dictionary. * @param {string} text The input text to split. * @returns {string[]} An array of tokens. */ split(text) { const result = []; const n = text.length; let start = 0; let i = 0; while (i < n) { let node = this.trie; let match = null; let j = i; while (j < n && (node = node[text[j]])) { if (node.end) { // Always keep the last (i.e., longest) match. match = node.end; } ++j; } if (match) { if (i > start) { result.push(text.slice(start, i)); } result.push(match); i += match.length; start = i; } else { ++i; } } if (start < n) { result.push(text.slice(start)); } return result; } } /** * A simple Least Recently Used (LRU) cache implementation in JavaScript. * This cache stores key-value pairs and evicts the least recently used item * when the capacity is exceeded. */ export class LRUCache { /** * Creates an LRUCache instance. * @param {number} capacity The maximum number of items the cache can hold. */ constructor(capacity) { this.capacity = capacity; this.cache = new Map(); } /** * Retrieves the value associated with the given key and marks the key as recently used. * @param {any} key The key to retrieve. * @returns {any} The value associated with the key, or undefined if the key does not exist. */ get(key) { if (!this.cache.has(key)) return undefined; const value = this.cache.get(key); this.cache.delete(key); this.cache.set(key, value); return value; } /** * Inserts or updates the key-value pair in the cache. * If the key already exists, it is updated and marked as recently used. * If the cache exceeds its capacity, the least recently used item is evicted. * @param {any} key The key to add or update. * @param {any} value The value to associate with the key. */ put(key, value) { if (this.cache.has(key)) { this.cache.delete(key); } this.cache.set(key, value); if (this.cache.size > this.capacity) { this.cache.delete(this.cache.keys().next().value); } } /** * Clears the cache. */ clear() { this.cache.clear(); } }
transformers.js/src/utils/data-structures.js/0
{ "file_path": "transformers.js/src/utils/data-structures.js", "repo_id": "transformers.js", "token_count": 7599 }
370
import { AutoImageProcessor, Qwen2VLImageProcessor } from "../../../src/transformers.js"; import { load_cached_image } from "../../asset_cache.js"; import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js"; export default () => { // Qwen2VLImageProcessor // - custom image processing (min_pixels, max_pixels) describe("Qwen2VLImageProcessor", () => { const model_id = "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration"; /** @type {Qwen2VLImageProcessor} */ let processor; beforeAll(async () => { processor = await AutoImageProcessor.from_pretrained(model_id); }, MAX_PROCESSOR_LOAD_TIME); it( "custom image processing", async () => { const image = await load_cached_image("white_image"); const { pixel_values, image_grid_thw, original_sizes, reshaped_input_sizes } = await processor(image); expect(pixel_values.dims).toEqual([256, 1176]); expect(pixel_values.mean().item()).toBeCloseTo(2.050372362136841, 6); expect(image_grid_thw.tolist()).toEqual([[1n, 16n, 16n]]); expect(original_sizes).toEqual([[224, 224]]); expect(reshaped_input_sizes).toEqual([[224, 224]]); }, MAX_TEST_EXECUTION_TIME, ); }); };
transformers.js/tests/models/qwen2_vl/test_image_processing_qwen2_vl.js/0
{ "file_path": "transformers.js/tests/models/qwen2_vl/test_image_processing_qwen2_vl.js", "repo_id": "transformers.js", "token_count": 515 }
371
import { Wav2Vec2CTCTokenizer } from "../../../src/tokenizers.js"; import { BASE_TEST_STRINGS, BERT_TEST_STRINGS } from "../test_strings.js"; export const TOKENIZER_CLASS = Wav2Vec2CTCTokenizer; export const TEST_CONFIG = { "Xenova/wav2vec2-base-960h": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, tokens: ["H", "o", "w", "|", "a", "r", "e", "|", "y", "o", "u", "|", "d", "o", "i", "n", "g", "?"], ids: [11, 3, 3, 4, 3, 3, 3, 4, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3], decoded: "H<unk> <unk> <unk> <unk>", }, SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["Y", "o", "u", "|", "s", "h", "o", "u", "l", "d", "'", "v", "e", "|", "d", "o", "n", "e", "|", "t", "h", "i", "s"], ids: [22, 3, 3, 4, 3, 3, 3, 3, 3, 3, 27, 3, 3, 4, 3, 3, 3, 3, 4, 3, 3, 3, 3], decoded: "Y<unk> <unk>'<unk> <unk> <unk>", }, NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "|", "0", "|", "1", "|", "2", "|", "3", "|", "4", "|", "5", "|", "6", "|", "7", "|", "8", "|", "9", "|", "1", "0", "|", "1", "0", "0", "|", "1", "0", "0", "0"], ids: [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 3, 4, 3, 3, 3, 4, 3, 3, 3, 3], decoded: "<unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["T", "h", "e", "|", "c", "o", "m", "p", "a", "n", "y", "|", "w", "a", "s", "|", "f", "o", "u", "n", "d", "e", "d", "|", "i", "n", "|", "2", "0", "1", "6", "."], ids: [6, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 4, 3, 3, 3, 3, 3], decoded: "T<unk> <unk> <unk> <unk> <unk> <unk>", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["A", "\n", "'", "l", "l", "|", "!", "!", "t", "o", "?", "'", "d", "'", "'", "d", "|", "o", "f", ",", "|", "c", "a", "n", "'", "t", "."], ids: [7, 3, 27, 3, 3, 4, 3, 3, 3, 3, 3, 27, 3, 27, 27, 3, 4, 3, 3, 3, 4, 3, 3, 3, 27, 3, 3], decoded: "A<unk>'<unk> <unk>'<unk>'<unk> <unk> <unk>'<unk>", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["d", "e", "f", "|", "m", "a", "i", "n", "(", ")", ":", "\n", "\t", "p", "a", "s", "s"], ids: [3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], decoded: "<unk> <unk>", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["l", "e", "t", "|", "a", "|", "=", "|", "o", "b", "j", ".", "t", "o", "S", "t", "r", "i", "n", "g", "(", ")", ";", "\n", "t", "o", "S", "t", "r", "i", "n", "g", "(", ")", ";"], ids: [3, 3, 3, 4, 3, 4, 3, 4, 3, 3, 3, 3, 3, 3, 12, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 12, 3, 3, 3, 3, 3, 3, 3, 3], decoded: "<unk> <unk> <unk> <unk>S<unk>S<unk>", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["T", "h", "i", "s", "\n", "\n", "i", "s", "\n", "a", "\n", "t", "e", "s", "t", "."], ids: [6, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], decoded: "T<unk>", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["U", "N", "w", "a", "n", "t", "\u00e9", "d", ",", "r", "u", "n", "n", "i", "n", "g"], ids: [16, 9, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], decoded: "UN<unk>", }, CONTROL_TOKENS: { text: BASE_TEST_STRINGS.CONTROL_TOKENS, tokens: ["1", "\u0000", "2", "\ufffd", "3"], ids: [3, 3, 3, 3, 3], decoded: "<unk>", }, HELLO_WORLD_TITLECASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_TITLECASE, tokens: ["H", "e", "l", "l", "o", "|", "W", "o", "r", "l", "d"], ids: [11, 3, 3, 3, 3, 4, 18, 3, 3, 3, 3], decoded: "H<unk> W<unk>", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["h", "e", "l", "l", "o", "|", "w", "o", "r", "l", "d"], ids: [3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3], decoded: "<unk> <unk>", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u751f", "\u6d3b", "\u7684", "\u771f", "\u8c1b", "\u662f"], ids: [3, 3, 3, 3, 3, 3], decoded: "<unk>", }, LEADING_SPACE: { text: BASE_TEST_STRINGS.LEADING_SPACE, tokens: ["|", "|", "|", "l", "e", "a", "d", "i", "n", "g", "|", "s", "p", "a", "c", "e"], ids: [4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3], decoded: "<unk> <unk>", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["t", "r", "a", "i", "l", "i", "n", "g", "|", "s", "p", "a", "c", "e", "|", "|", "|"], ids: [3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 4, 4, 4], decoded: "<unk> <unk>", }, SURROUNDING_SPACE: { text: BASE_TEST_STRINGS.SURROUNDING_SPACE, tokens: ["|", "|", "|", "s", "u", "r", "r", "o", "u", "n", "d", "i", "n", "g", "|", "s", "p", "a", "c", "e", "|", "|", "|"], ids: [4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 4, 4, 4], decoded: "<unk> <unk>", }, DOUBLE_SPACE: { text: BASE_TEST_STRINGS.DOUBLE_SPACE, tokens: ["H", "i", "|", "|", "H", "e", "l", "l", "o"], ids: [11, 3, 4, 4, 11, 3, 3, 3, 3], decoded: "H<unk> H<unk>", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["t", "e", "s", "t", "|", "$", "1", "|", "R", "2", "|", "#", "3", "|", "\u20ac", "4", "|", "\u00a3", "5", "|", "\u00a5", "6", "|", "\u20a3", "7", "|", "\u20b9", "8", "|", "\u20b1", "9", "|", "t", "e", "s", "t"], ids: [3, 3, 3, 3, 4, 3, 3, 4, 13, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 3, 3], decoded: "<unk> <unk> R<unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "|", "b", "o", "u", "g", "h", "t", "|", "a", "n", "|", "a", "p", "p", "l", "e", "|", "f", "o", "r", "|", "$", "1", ".", "0", "0", "|", "a", "t", "|", "t", "h", "e", "|", "s", "t", "o", "r", "e", "."], ids: [10, 4, 3, 3, 3, 3, 3, 3, 4, 3, 3, 4, 3, 3, 3, 3, 3, 4, 3, 3, 3, 4, 3, 3, 3, 3, 3, 4, 3, 3, 4, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3], decoded: "I <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["y", "o", "u", "\u2026", "|", "|"], ids: [3, 3, 3, 3, 4, 4], decoded: "<unk>", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["y", "o", "u", "\u2026", "\u00a0", "\u00a0"], ids: [3, 3, 3, 3, 3, 3], decoded: "<unk>", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["y", "o", "u", "\u2026", "\u00a0", "\u00a0", "y", "o", "u", "\u2026", "\u00a0", "\u00a0"], ids: [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], decoded: "<unk>", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["w", "e", "i", "r", "d", "|", "\uff5e", "|", "e", "d", "g", "e", "|", "\uff5e", "|", "c", "a", "s", "e"], ids: [3, 3, 3, 3, 3, 4, 3, 4, 3, 3, 3, 3, 4, 3, 4, 3, 3, 3, 3], decoded: "<unk> <unk> <unk> <unk> <unk>", }, SPIECE_UNDERSCORE: { text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE, tokens: ["\u2581", "T", "h", "i", "s", "|", "\u2581", "i", "s", "|", "\u2581", "a", "|", "\u2581", "t", "e", "s", "t", "|", "\u2581", "."], ids: [3, 6, 3, 3, 3, 4, 3, 3, 3, 4, 3, 3, 4, 3, 3, 3, 3, 3, 4, 3, 3], decoded: "<unk>T<unk> <unk> <unk> <unk> <unk>", }, POPULAR_EMOJIS: { text: BASE_TEST_STRINGS.POPULAR_EMOJIS, tokens: ["\ud83d\ude02", "|", "\ud83d\udc4d", "|", "\ud83e\udd23", "|", "\ud83d\ude0d", "|", "\ud83d\ude2d", "|", "\ud83c\udf89", "|", "\ud83d\ude4f", "|", "\ud83d\ude0a", "|", "\ud83d\udd25", "|", "\ud83d\ude01", "|", "\ud83d\ude05", "|", "\ud83e\udd17", "|", "\ud83d\ude06", "|", "\ud83d\udc4f", "|", "\u2764", "\ufe0f", "|", "\ud83d\udc9c", "|", "\ud83d\udc9a", "|", "\ud83d\udc97", "|", "\ud83d\udc99", "|", "\ud83d\udda4", "|", "\ud83d\ude0e", "|", "\ud83d\udc4c", "|", "\ud83e\udd73", "|", "\ud83d\udcaa", "|", "\u2728", "|", "\ud83d\udc49", "|", "\ud83d\udc40", "|", "\ud83d\udcaf", "|", "\ud83c\udf88", "|", "\ud83d\ude48", "|", "\ud83d\ude4c", "|", "\ud83d\udc80", "|", "\ud83d\udc47", "|", "\ud83d\udc4b", "|", "\u2705", "|", "\ud83c\udf81", "|", "\ud83c\udf1e", "|", "\ud83c\udf38", "|", "\ud83d\udcb0"], ids: [3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3], decoded: "<unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>", }, MULTIBYTE_EMOJIS: { text: BASE_TEST_STRINGS.MULTIBYTE_EMOJIS, tokens: ["\u2728", "|", "\ud83e\udd17", "|", "\ud83d\udc41", "\ufe0f", "|", "\ud83d\udc71", "\ud83c\udffb", "|", "\ud83d\udd75", "\u200d", "\u2642", "\ufe0f", "|", "\ud83e\uddd9", "\ud83c\udffb", "\u200d", "\u2642", "|", "\ud83d\udc68", "\ud83c\udffb", "\u200d", "\ud83c\udf3e", "|", "\ud83e\uddd1", "\u200d", "\ud83e\udd1d", "\u200d", "\ud83e\uddd1", "|", "\ud83d\udc69", "\u200d", "\u2764", "\u200d", "\ud83d\udc8b", "\u200d", "\ud83d\udc68", "|", "\ud83d\udc69", "\u200d", "\ud83d\udc69", "\u200d", "\ud83d\udc67", "\u200d", "\ud83d\udc66", "|", "\ud83e\uddd1", "\ud83c\udffb", "\u200d", "\ud83e\udd1d", "\u200d", "\ud83e\uddd1", "\ud83c\udffb", "|", "\ud83c\udff4", "\udb40\udc67", "\udb40\udc62", "\udb40\udc65", "\udb40\udc6e", "\udb40\udc67", "\udb40\udc7f", "|", "\ud83d\udc68", "\ud83c\udffb", "\u200d", "\u2764", "\ufe0f", "\u200d", "\ud83d\udc8b", "\u200d", "\ud83d\udc68", "\ud83c\udffc"], ids: [3, 4, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 3, 3, 4, 3, 3, 3, 3, 4, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], decoded: "<unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>", }, ONLY_WHITESPACE: { text: BASE_TEST_STRINGS.ONLY_WHITESPACE, tokens: ["|", "\t", "\n"], ids: [4, 3, 3], decoded: "<unk>", }, CHINESE_LATIN_MIXED: { text: BERT_TEST_STRINGS.CHINESE_LATIN_MIXED, tokens: ["a", "h", "\u535a", "\u63a8", "z", "z"], ids: [3, 3, 3, 3, 3, 3], decoded: "<unk>", }, SIMPLE_WITH_ACCENTS: { text: BERT_TEST_STRINGS.SIMPLE_WITH_ACCENTS, tokens: ["H", "\u00e9", "l", "l", "o"], ids: [11, 3, 3, 3, 3], decoded: "H<unk>", }, MIXED_CASE_WITHOUT_ACCENTS: { text: BERT_TEST_STRINGS.MIXED_CASE_WITHOUT_ACCENTS, tokens: ["|", "\t", "H", "e", "L", "L", "o", "!", "h", "o", "w", "|", "|", "\n", "|", "A", "r", "e", "|", "y", "o", "U", "?", "|", "|"], ids: [4, 3, 11, 3, 15, 15, 3, 3, 3, 3, 3, 4, 4, 3, 4, 7, 3, 3, 4, 3, 3, 16, 3, 4, 4], decoded: "<unk>H<unk>L<unk> <unk> A<unk> <unk>U<unk>", }, MIXED_CASE_WITH_ACCENTS: { text: BERT_TEST_STRINGS.MIXED_CASE_WITH_ACCENTS, tokens: ["|", "\t", "H", "\u00e4", "L", "L", "o", "!", "h", "o", "w", "|", "|", "\n", "|", "A", "r", "e", "|", "y", "o", "U", "?", "|", "|"], ids: [4, 3, 11, 3, 15, 15, 3, 3, 3, 3, 3, 4, 4, 3, 4, 7, 3, 3, 4, 3, 3, 16, 3, 4, 4], decoded: "<unk>H<unk>L<unk> <unk> A<unk> <unk>U<unk>", }, }, "Xenova/wav2vec2-large-xlsr-53-english": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, tokens: ["H", "o", "w", "|", "a", "r", "e", "|", "y", "o", "u", "|", "d", "o", "i", "n", "g", "?"], ids: [3, 21, 29, 4, 7, 24, 11, 4, 31, 21, 27, 4, 10, 21, 15, 20, 13, 3], decoded: "<unk>ow are you doing<unk>", }, SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["Y", "o", "u", "|", "s", "h", "o", "u", "l", "d", "'", "v", "e", "|", "d", "o", "n", "e", "|", "t", "h", "i", "s"], ids: [3, 21, 27, 4, 25, 14, 21, 27, 18, 10, 5, 28, 11, 4, 10, 21, 20, 11, 4, 26, 14, 15, 25], decoded: "<unk>ou should've done this", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["T", "h", "e", "|", "c", "o", "m", "p", "a", "n", "y", "|", "w", "a", "s", "|", "f", "o", "u", "n", "d", "e", "d", "|", "i", "n", "|", "2", "0", "1", "6", "."], ids: [3, 14, 11, 4, 9, 21, 19, 22, 7, 20, 31, 4, 29, 7, 25, 4, 12, 21, 27, 20, 10, 11, 10, 4, 15, 20, 4, 3, 3, 3, 3, 3], decoded: "<unk>he company was founded in <unk>", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["A", "\n", "'", "l", "l", "|", "!", "!", "t", "o", "?", "'", "d", "'", "'", "d", "|", "o", "f", ",", "|", "c", "a", "n", "'", "t", "."], ids: [3, 3, 5, 18, 18, 4, 3, 3, 26, 21, 3, 5, 10, 5, 5, 10, 4, 21, 12, 3, 4, 9, 7, 20, 5, 26, 3], decoded: "<unk>'l <unk>to<unk>'d'd of<unk> can't<unk>", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["d", "e", "f", "|", "m", "a", "i", "n", "(", ")", ":", "\n", "\t", "p", "a", "s", "s"], ids: [10, 11, 12, 4, 19, 7, 15, 20, 3, 3, 3, 3, 3, 22, 7, 25, 25], decoded: "def main<unk>pas", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["l", "e", "t", "|", "a", "|", "=", "|", "o", "b", "j", ".", "t", "o", "S", "t", "r", "i", "n", "g", "(", ")", ";", "\n", "t", "o", "S", "t", "r", "i", "n", "g", "(", ")", ";"], ids: [18, 11, 26, 4, 7, 4, 3, 4, 21, 8, 16, 3, 26, 21, 3, 26, 24, 15, 20, 13, 3, 3, 3, 3, 26, 21, 3, 26, 24, 15, 20, 13, 3, 3, 3], decoded: "let a <unk> obj<unk>to<unk>tring<unk>to<unk>tring<unk>", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["T", "h", "i", "s", "\n", "\n", "i", "s", "\n", "a", "\n", "t", "e", "s", "t", "."], ids: [3, 14, 15, 25, 3, 3, 15, 25, 3, 7, 3, 26, 11, 25, 26, 3], decoded: "<unk>his<unk>is<unk>a<unk>test<unk>", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["U", "N", "w", "a", "n", "t", "\u00e9", "d", ",", "r", "u", "n", "n", "i", "n", "g"], ids: [3, 3, 29, 7, 20, 26, 3, 10, 3, 24, 27, 20, 20, 15, 20, 13], decoded: "<unk>want<unk>d<unk>runing", }, HELLO_WORLD_TITLECASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_TITLECASE, tokens: ["H", "e", "l", "l", "o", "|", "W", "o", "r", "l", "d"], ids: [3, 11, 18, 18, 21, 4, 3, 21, 24, 18, 10], decoded: "<unk>elo <unk>orld", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["h", "e", "l", "l", "o", "|", "w", "o", "r", "l", "d"], ids: [14, 11, 18, 18, 21, 4, 29, 21, 24, 18, 10], decoded: "helo world", }, LEADING_SPACE: { text: BASE_TEST_STRINGS.LEADING_SPACE, tokens: ["|", "|", "|", "l", "e", "a", "d", "i", "n", "g", "|", "s", "p", "a", "c", "e"], ids: [4, 4, 4, 18, 11, 7, 10, 15, 20, 13, 4, 25, 22, 7, 9, 11], decoded: "leading space", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["t", "r", "a", "i", "l", "i", "n", "g", "|", "s", "p", "a", "c", "e", "|", "|", "|"], ids: [26, 24, 7, 15, 18, 15, 20, 13, 4, 25, 22, 7, 9, 11, 4, 4, 4], decoded: "trailing space", }, SURROUNDING_SPACE: { text: BASE_TEST_STRINGS.SURROUNDING_SPACE, tokens: ["|", "|", "|", "s", "u", "r", "r", "o", "u", "n", "d", "i", "n", "g", "|", "s", "p", "a", "c", "e", "|", "|", "|"], ids: [4, 4, 4, 25, 27, 24, 24, 21, 27, 20, 10, 15, 20, 13, 4, 25, 22, 7, 9, 11, 4, 4, 4], decoded: "surounding space", }, DOUBLE_SPACE: { text: BASE_TEST_STRINGS.DOUBLE_SPACE, tokens: ["H", "i", "|", "|", "H", "e", "l", "l", "o"], ids: [3, 15, 4, 4, 3, 11, 18, 18, 21], decoded: "<unk>i <unk>elo", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["t", "e", "s", "t", "|", "$", "1", "|", "R", "2", "|", "#", "3", "|", "\u20ac", "4", "|", "\u00a3", "5", "|", "\u00a5", "6", "|", "\u20a3", "7", "|", "\u20b9", "8", "|", "\u20b1", "9", "|", "t", "e", "s", "t"], ids: [26, 11, 25, 26, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 26, 11, 25, 26], decoded: "test <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> test", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "|", "b", "o", "u", "g", "h", "t", "|", "a", "n", "|", "a", "p", "p", "l", "e", "|", "f", "o", "r", "|", "$", "1", ".", "0", "0", "|", "a", "t", "|", "t", "h", "e", "|", "s", "t", "o", "r", "e", "."], ids: [3, 4, 8, 21, 27, 13, 14, 26, 4, 7, 20, 4, 7, 22, 22, 18, 11, 4, 12, 21, 24, 4, 3, 3, 3, 3, 3, 4, 7, 26, 4, 26, 14, 11, 4, 25, 26, 21, 24, 11, 3], decoded: "<unk> bought an aple for <unk> at the store<unk>", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["y", "o", "u", "\u2026", "|", "|"], ids: [31, 21, 27, 3, 4, 4], decoded: "you<unk>", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["y", "o", "u", "\u2026", "\u00a0", "\u00a0"], ids: [31, 21, 27, 3, 3, 3], decoded: "you<unk>", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["y", "o", "u", "\u2026", "\u00a0", "\u00a0", "y", "o", "u", "\u2026", "\u00a0", "\u00a0"], ids: [31, 21, 27, 3, 3, 3, 31, 21, 27, 3, 3, 3], decoded: "you<unk>you<unk>", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["w", "e", "i", "r", "d", "|", "\uff5e", "|", "e", "d", "g", "e", "|", "\uff5e", "|", "c", "a", "s", "e"], ids: [29, 11, 15, 24, 10, 4, 3, 4, 11, 10, 13, 11, 4, 3, 4, 9, 7, 25, 11], decoded: "weird <unk> edge <unk> case", }, SPIECE_UNDERSCORE: { text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE, tokens: ["\u2581", "T", "h", "i", "s", "|", "\u2581", "i", "s", "|", "\u2581", "a", "|", "\u2581", "t", "e", "s", "t", "|", "\u2581", "."], ids: [3, 3, 14, 15, 25, 4, 3, 15, 25, 4, 3, 7, 4, 3, 26, 11, 25, 26, 4, 3, 3], decoded: "<unk>his <unk>is <unk>a <unk>test <unk>", }, CHINESE_LATIN_MIXED: { text: BERT_TEST_STRINGS.CHINESE_LATIN_MIXED, tokens: ["a", "h", "\u535a", "\u63a8", "z", "z"], ids: [7, 14, 3, 3, 32, 32], decoded: "ah<unk>z", }, SIMPLE_WITH_ACCENTS: { text: BERT_TEST_STRINGS.SIMPLE_WITH_ACCENTS, tokens: ["H", "\u00e9", "l", "l", "o"], ids: [3, 3, 18, 18, 21], decoded: "<unk>lo", }, MIXED_CASE_WITHOUT_ACCENTS: { text: BERT_TEST_STRINGS.MIXED_CASE_WITHOUT_ACCENTS, tokens: ["|", "\t", "H", "e", "L", "L", "o", "!", "h", "o", "w", "|", "|", "\n", "|", "A", "r", "e", "|", "y", "o", "U", "?", "|", "|"], ids: [4, 3, 3, 11, 3, 3, 21, 3, 14, 21, 29, 4, 4, 3, 4, 3, 24, 11, 4, 31, 21, 3, 3, 4, 4], decoded: "<unk>e<unk>o<unk>how <unk> <unk>re yo<unk>", }, MIXED_CASE_WITH_ACCENTS: { text: BERT_TEST_STRINGS.MIXED_CASE_WITH_ACCENTS, tokens: ["|", "\t", "H", "\u00e4", "L", "L", "o", "!", "h", "o", "w", "|", "|", "\n", "|", "A", "r", "e", "|", "y", "o", "U", "?", "|", "|"], ids: [4, 3, 3, 3, 3, 3, 21, 3, 14, 21, 29, 4, 4, 3, 4, 3, 24, 11, 4, 31, 21, 3, 3, 4, 4], decoded: "<unk>o<unk>how <unk> <unk>re yo<unk>", }, }, "Xenova/mms-1b-all": { NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "|", "0", "|", "1", "|", "2", "|", "3", "|", "4", "|", "5", "|", "6", "|", "7", "|", "8", "|", "9", "|", "1", "0", "|", "1", "0", "0", "|", "1", "0", "0", "0"], ids: [27, 30, 35, 41, 39, 38, 40, 43, 42, 36, 4, 27, 4, 30, 4, 35, 4, 41, 4, 39, 4, 38, 4, 40, 4, 43, 4, 42, 4, 36, 4, 30, 27, 4, 30, 27, 27, 4, 30, 27, 27, 27], decoded: "0123456789 0 1 2 3 4 5 6 7 8 9 10 10 10", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["T", "h", "e", "|", "c", "o", "m", "p", "a", "n", "y", "|", "w", "a", "s", "|", "f", "o", "u", "n", "d", "e", "d", "|", "i", "n", "|", "2", "0", "1", "6", "."], ids: [3, 13, 5, 4, 16, 8, 18, 20, 7, 10, 22, 4, 23, 7, 11, 4, 19, 8, 17, 10, 15, 5, 15, 4, 9, 10, 4, 35, 27, 30, 40, 37], decoded: "<unk>he company was founded in 2016.", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["A", "\n", "'", "l", "l", "|", "!", "!", "t", "o", "?", "'", "d", "'", "'", "d", "|", "o", "f", ",", "|", "c", "a", "n", "'", "t", "."], ids: [3, 3, 31, 14, 14, 4, 75, 75, 6, 8, 3, 31, 15, 31, 31, 15, 4, 8, 19, 44, 4, 16, 7, 10, 31, 6, 37], decoded: "<unk>'l!to<unk>'d'd of, can't.", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["d", "e", "f", "|", "m", "a", "i", "n", "(", ")", ":", "\n", "\t", "p", "a", "s", "s"], ids: [15, 5, 19, 4, 18, 7, 9, 10, 3, 3, 46, 3, 3, 20, 7, 11, 11], decoded: "def main<unk>:<unk>pas", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["l", "e", "t", "|", "a", "|", "=", "|", "o", "b", "j", ".", "t", "o", "S", "t", "r", "i", "n", "g", "(", ")", ";", "\n", "t", "o", "S", "t", "r", "i", "n", "g", "(", ")", ";"], ids: [14, 5, 6, 4, 7, 4, 3, 4, 8, 24, 29, 37, 6, 8, 3, 6, 12, 9, 10, 21, 3, 3, 52, 3, 6, 8, 3, 6, 12, 9, 10, 21, 3, 3, 52], decoded: "let a <unk> obj.to<unk>tring<unk>;<unk>to<unk>tring<unk>;", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["T", "h", "i", "s", "\n", "\n", "i", "s", "\n", "a", "\n", "t", "e", "s", "t", "."], ids: [3, 13, 9, 11, 3, 3, 9, 11, 3, 7, 3, 6, 5, 11, 6, 37], decoded: "<unk>his<unk>is<unk>a<unk>test.", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["U", "N", "w", "a", "n", "t", "\u00e9", "d", ",", "r", "u", "n", "n", "i", "n", "g"], ids: [3, 3, 23, 7, 10, 6, 55, 15, 44, 12, 17, 10, 10, 9, 10, 21], decoded: "<unk>want\u00e9d,runing", }, CONTROL_TOKENS: { text: BASE_TEST_STRINGS.CONTROL_TOKENS, tokens: ["1", "\u0000", "2", "\ufffd", "3"], ids: [30, 3, 35, 3, 41], decoded: "1<unk>2<unk>3", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u751f", "\u6d3b", "\u7684", "\u771f", "\u8c1b", "\u662f"], ids: [136, 3, 3, 3, 3, 3], decoded: "\u751f<unk>", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["t", "e", "s", "t", "|", "$", "1", "|", "R", "2", "|", "#", "3", "|", "\u20ac", "4", "|", "\u00a3", "5", "|", "\u00a5", "6", "|", "\u20a3", "7", "|", "\u20b9", "8", "|", "\u20b1", "9", "|", "t", "e", "s", "t"], ids: [6, 5, 11, 6, 4, 48, 30, 4, 3, 35, 4, 3, 41, 4, 3, 39, 4, 68, 38, 4, 53, 40, 4, 3, 43, 4, 3, 42, 4, 3, 36, 4, 6, 5, 11, 6], decoded: "test $1 <unk>2 <unk>3 <unk>4 \u00a35 \u00a56 <unk>7 <unk>8 <unk>9 test", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "|", "b", "o", "u", "g", "h", "t", "|", "a", "n", "|", "a", "p", "p", "l", "e", "|", "f", "o", "r", "|", "$", "1", ".", "0", "0", "|", "a", "t", "|", "t", "h", "e", "|", "s", "t", "o", "r", "e", "."], ids: [3, 4, 24, 8, 17, 21, 13, 6, 4, 7, 10, 4, 7, 20, 20, 14, 5, 4, 19, 8, 12, 4, 48, 30, 37, 27, 27, 4, 7, 6, 4, 6, 13, 5, 4, 11, 6, 8, 12, 5, 37], decoded: "<unk> bought an aple for $1.0 at the store.", }, SPIECE_UNDERSCORE: { text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE, tokens: ["\u2581", "T", "h", "i", "s", "|", "\u2581", "i", "s", "|", "\u2581", "a", "|", "\u2581", "t", "e", "s", "t", "|", "\u2581", "."], ids: [3, 3, 13, 9, 11, 4, 3, 9, 11, 4, 3, 7, 4, 3, 6, 5, 11, 6, 4, 3, 37], decoded: "<unk>his <unk>is <unk>a <unk>test <unk>.", }, SIMPLE_WITH_ACCENTS: { text: BERT_TEST_STRINGS.SIMPLE_WITH_ACCENTS, tokens: ["H", "\u00e9", "l", "l", "o"], ids: [3, 55, 14, 14, 8], decoded: "<unk>\u00e9lo", }, MIXED_CASE_WITHOUT_ACCENTS: { text: BERT_TEST_STRINGS.MIXED_CASE_WITHOUT_ACCENTS, tokens: ["|", "\t", "H", "e", "L", "L", "o", "!", "h", "o", "w", "|", "|", "\n", "|", "A", "r", "e", "|", "y", "o", "U", "?", "|", "|"], ids: [4, 3, 3, 5, 3, 3, 8, 75, 13, 8, 23, 4, 4, 3, 4, 3, 12, 5, 4, 22, 8, 3, 3, 4, 4], decoded: "<unk>e<unk>o!how <unk> <unk>re yo<unk>", }, MIXED_CASE_WITH_ACCENTS: { text: BERT_TEST_STRINGS.MIXED_CASE_WITH_ACCENTS, tokens: ["|", "\t", "H", "\u00e4", "L", "L", "o", "!", "h", "o", "w", "|", "|", "\n", "|", "A", "r", "e", "|", "y", "o", "U", "?", "|", "|"], ids: [4, 3, 3, 78, 3, 3, 8, 75, 13, 8, 23, 4, 4, 3, 4, 3, 12, 5, 4, 22, 8, 3, 3, 4, 4], decoded: "<unk>\u00e4<unk>o!how <unk> <unk>re yo<unk>", }, }, "Xenova/mms-1b-fl102": { MIXED_CASE_WITH_ACCENTS: { text: BERT_TEST_STRINGS.MIXED_CASE_WITH_ACCENTS, tokens: ["|", "\t", "H", "\u00e4", "L", "L", "o", "!", "h", "o", "w", "|", "|", "\n", "|", "A", "r", "e", "|", "y", "o", "U", "?", "|", "|"], ids: [4, 3, 3, 3, 3, 3, 8, 75, 13, 8, 23, 4, 4, 3, 4, 3, 12, 5, 4, 22, 8, 3, 3, 4, 4], decoded: "<unk>o!how <unk> <unk>re yo<unk>", }, }, "Xenova/mms-1b-l1107": { NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "|", "0", "|", "1", "|", "2", "|", "3", "|", "4", "|", "5", "|", "6", "|", "7", "|", "8", "|", "9", "|", "1", "0", "|", "1", "0", "0", "|", "1", "0", "0", "0"], ids: [34, 36, 37, 42, 38, 41, 39, 3, 3, 3, 4, 34, 4, 36, 4, 37, 4, 42, 4, 38, 4, 41, 4, 39, 4, 3, 4, 3, 4, 3, 4, 36, 34, 4, 36, 34, 34, 4, 36, 34, 34, 34], decoded: "0123456<unk> 0 1 2 3 4 5 6 <unk> <unk> <unk> 10 10 10", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["T", "h", "e", "|", "c", "o", "m", "p", "a", "n", "y", "|", "w", "a", "s", "|", "f", "o", "u", "n", "d", "e", "d", "|", "i", "n", "|", "2", "0", "1", "6", "."], ids: [3, 9, 5, 4, 21, 7, 18, 24, 8, 10, 20, 4, 17, 8, 12, 4, 19, 7, 16, 10, 14, 5, 14, 4, 11, 10, 4, 37, 34, 36, 39, 3], decoded: "<unk>he company was founded in 2016<unk>", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["t", "e", "s", "t", "|", "$", "1", "|", "R", "2", "|", "#", "3", "|", "\u20ac", "4", "|", "\u00a3", "5", "|", "\u00a5", "6", "|", "\u20a3", "7", "|", "\u20b9", "8", "|", "\u20b1", "9", "|", "t", "e", "s", "t"], ids: [6, 5, 12, 6, 4, 3, 36, 4, 3, 37, 4, 3, 42, 4, 3, 38, 4, 3, 41, 4, 3, 39, 4, 3, 3, 4, 3, 3, 4, 3, 3, 4, 6, 5, 12, 6], decoded: "test <unk>1 <unk>2 <unk>3 <unk>4 <unk>5 <unk>6 <unk> <unk> <unk> test", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "|", "b", "o", "u", "g", "h", "t", "|", "a", "n", "|", "a", "p", "p", "l", "e", "|", "f", "o", "r", "|", "$", "1", ".", "0", "0", "|", "a", "t", "|", "t", "h", "e", "|", "s", "t", "o", "r", "e", "."], ids: [3, 4, 23, 7, 16, 22, 9, 6, 4, 8, 10, 4, 8, 24, 24, 15, 5, 4, 19, 7, 13, 4, 3, 36, 3, 34, 34, 4, 8, 6, 4, 6, 9, 5, 4, 12, 6, 7, 13, 5, 3], decoded: "<unk> bought an aple for <unk>1<unk>0 at the store<unk>", }, }, };
transformers.js/tests/models/wav2vec2/test_tokenization_wav2vec2.js/0
{ "file_path": "transformers.js/tests/models/wav2vec2/test_tokenization_wav2vec2.js", "repo_id": "transformers.js", "token_count": 15864 }
372
import { pipeline, ImageFeatureExtractionPipeline } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; import { load_cached_image } from "../asset_cache.js"; const PIPELINE_ID = "image-feature-extraction"; export default () => { describe("Image Feature Extraction", () => { describe("Default", () => { const model_id = "hf-internal-testing/tiny-random-ViTMAEModel"; /** @type {ImageFeatureExtractionPipeline} */ let pipe; let images; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); images = await Promise.all([load_cached_image("white_image"), load_cached_image("blue_image")]); }, MAX_MODEL_LOAD_TIME); it("should be an instance of ImageFeatureExtractionPipeline", () => { expect(pipe).toBeInstanceOf(ImageFeatureExtractionPipeline); }); describe("batch_size=1", () => { it( "default", async () => { const output = await pipe(images[0]); expect(output.dims).toEqual([1, 91, 32]); expect(output.mean().item()).toBeCloseTo(-8.507473614471905e-10, 6); }, MAX_TEST_EXECUTION_TIME, ); }); describe("batch_size>1", () => { it( "default", async () => { const output = await pipe(images); expect(output.dims).toEqual([images.length, 91, 32]); expect(output.mean().item()).toBeCloseTo(-5.997602414709036e-10, 6); }, MAX_TEST_EXECUTION_TIME, ); }); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("CLIP-like", () => { const model_id = "hf-internal-testing/tiny-random-CLIPModel"; /** @type {ImageFeatureExtractionPipeline} */ let pipe; let images; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); images = await Promise.all([load_cached_image("white_image"), load_cached_image("blue_image")]); }, MAX_MODEL_LOAD_TIME); it("should be an instance of ImageFeatureExtractionPipeline", () => { expect(pipe).toBeInstanceOf(ImageFeatureExtractionPipeline); }); describe("batch_size=1", () => { it( "default", async () => { const output = await pipe(images[0]); expect(output.dims).toEqual([1, 64]); expect(output.mean().item()).toBeCloseTo(-0.11340035498142242, 6); }, MAX_TEST_EXECUTION_TIME, ); }); describe("batch_size>1", () => { it( "default", async () => { const output = await pipe(images); expect(output.dims).toEqual([images.length, 64]); expect(output.mean().item()).toBeCloseTo(-0.11006651818752289, 6); }, MAX_TEST_EXECUTION_TIME, ); }); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); }); };
transformers.js/tests/pipelines/test_pipelines_image_feature_extraction.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_image_feature_extraction.js", "repo_id": "transformers.js", "token_count": 1475 }
373
import { pipeline, ZeroShotObjectDetectionPipeline } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; import { load_cached_image } from "../asset_cache.js"; const PIPELINE_ID = "zero-shot-object-detection"; export default () => { describe("Zero-shot Object Detection", () => { describe("w/ post_process_object_detection", () => { const model_id = "hf-internal-testing/tiny-random-OwlViTForObjectDetection"; const candidate_labels = ["hello", "hello world"]; /** @type {ZeroShotObjectDetectionPipeline} */ let pipe; let images; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); images = await Promise.all([load_cached_image("white_image"), load_cached_image("blue_image")]); }, MAX_MODEL_LOAD_TIME); const targets = { white_image: [ { score: 0.6028420329093933, label: "hello", box: { xmin: 47, ymin: 117, xmax: 62, ymax: 134 }, }, { score: 0.6026064157485962, label: "hello world", box: { xmin: 47, ymin: 117, xmax: 62, ymax: 134 }, }, { score: 0.5987668037414551, label: "hello world", box: { xmin: 145, ymin: 47, xmax: 160, ymax: 63 }, }, { score: 0.5986272692680359, label: "hello", box: { xmin: 89, ymin: 131, xmax: 104, ymax: 148 }, }, { score: 0.5985949039459229, label: "hello world", box: { xmin: 89, ymin: 131, xmax: 104, ymax: 148 }, }, // ... many more ], blue_image: [ { score: 0.6622366309165955, label: "hello", box: { xmin: 48, ymin: 45, xmax: 62, ymax: 61 }, }, { score: 0.6562080383300781, label: "hello world", box: { xmin: 48, ymin: 45, xmax: 62, ymax: 61 }, }, { score: 0.6493991613388062, label: "hello world", box: { xmin: 34, ymin: 58, xmax: 48, ymax: 74 }, }, { score: 0.6476974487304688, label: "hello", box: { xmin: 34, ymin: 58, xmax: 48, ymax: 74 }, }, { score: 0.6391685009002686, label: "hello", box: { xmin: 103, ymin: 59, xmax: 117, ymax: 75 }, }, // ... many more ], }; it("should be an instance of ZeroShotObjectDetectionPipeline", () => { expect(pipe).toBeInstanceOf(ZeroShotObjectDetectionPipeline); }); describe("batch_size=1", () => { it( "default", async () => { const output = await pipe(images[0], candidate_labels); expect(output).toHaveLength(512); expect(output.slice(0, targets.white_image.length)).toBeCloseToNested(targets.white_image, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "custom (w/ top_k & threshold)", async () => { const top_k = 3; const output = await pipe(images[0], candidate_labels, { top_k, threshold: 0.05 }); expect(output).toBeCloseToNested(targets.white_image.slice(0, top_k), 5); }, MAX_TEST_EXECUTION_TIME, ); }); describe("batch_size>1", () => { it( "default", async () => { const output = await pipe(images, candidate_labels); const target = Object.values(targets); expect(output.map((x, i) => x.slice(0, target[i].length))).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "custom (w/ top_k & threshold)", async () => { const top_k = 3; const output = await pipe(images, candidate_labels, { top_k, threshold: 0.05 }); const target = Object.values(targets).map((x) => x.slice(0, top_k)); expect(output).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); }); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("w/ post_process_grounded_object_detection", () => { const model_id = "hf-internal-testing/tiny-random-GroundingDinoForObjectDetection"; const candidate_labels = ["a cat."]; /** @type {ZeroShotObjectDetectionPipeline} */ let pipe; let image; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); image = await load_cached_image("white_image"); }, MAX_MODEL_LOAD_TIME); const target = [ { box: { xmax: 112, xmin: -111, ymax: 0, ymin: 0 }, label: "a cat. [SEP]", score: 1 }, { box: { xmax: 112, xmin: -111, ymax: 0, ymin: 0 }, label: "a cat. [SEP]", score: 1 }, { box: { xmax: 112, xmin: -111, ymax: 0, ymin: 0 }, label: "a cat. [SEP]", score: 1 }, // ... many more ]; it("should be an instance of ZeroShotObjectDetectionPipeline", () => { expect(pipe).toBeInstanceOf(ZeroShotObjectDetectionPipeline); }); describe("batch_size=1", () => { it( "default", async () => { const output = await pipe(image, candidate_labels); expect(output).toHaveLength(900); expect(output.slice(0, target.length)).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "custom (w/ top_k & threshold)", async () => { const top_k = 3; const output = await pipe(image, candidate_labels, { top_k, threshold: 0.05 }); expect(output).toBeCloseToNested(target.slice(0, top_k), 5); }, MAX_TEST_EXECUTION_TIME, ); }); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); }); };
transformers.js/tests/pipelines/test_pipelines_zero_shot_object_detection.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_zero_shot_object_detection.js", "repo_id": "transformers.js", "token_count": 3154 }
374
version: 2.1 setup: true orbs: continuation: circleci/continuation@0.1.0 parameters: nightly: type: boolean default: false GHA_Actor: type: string default: "" GHA_Action: type: string default: "" GHA_Event: type: string default: "" GHA_Meta: type: string default: "" jobs: # Ensure running with CircleCI/huggingface check_circleci_user: docker: - image: python:3.10-slim resource_class: small parallelism: 1 steps: - run: echo $CIRCLE_PROJECT_USERNAME - run: | if [ "$CIRCLE_PROJECT_USERNAME" = "huggingface" ]; then exit 0 else echo "The CI is running under $CIRCLE_PROJECT_USERNAME personal account. Please follow https://support.circleci.com/hc/en-us/articles/360008097173-Troubleshooting-why-pull-requests-are-not-triggering-jobs-on-my-organization- to fix it."; exit -1 fi # Fetch the tests to run fetch_tests: working_directory: ~/transformers docker: - image: huggingface/transformers-quality parallelism: 1 steps: - checkout - run: uv pip install -U -e . - run: echo 'export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)"' >> "$BASH_ENV" && source "$BASH_ENV" - run: mkdir -p test_preparation - run: python utils/tests_fetcher.py | tee tests_fetched_summary.txt - run: python utils/tests_fetcher.py --filter_tests - run: export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)" && echo $GIT_COMMIT_MESSAGE && python .circleci/create_circleci_config.py --fetcher_folder test_preparation - run: | if [ ! -s test_preparation/generated_config.yml ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - store_artifacts: path: test_preparation - run: name: "Retrieve Artifact Paths" # [reference] https://circleci.com/docs/api/v2/index.html#operation/getJobArtifacts # `CIRCLE_TOKEN` is defined as an environment variables set within a context, see `https://circleci.com/docs/contexts/` command: | project_slug="gh/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}" job_number=${CIRCLE_BUILD_NUM} url="https://circleci.com/api/v2/project/${project_slug}/${job_number}/artifacts" curl -o test_preparation/artifacts.json ${url} --header "Circle-Token: $CIRCLE_TOKEN" - run: name: "Prepare pipeline parameters" command: | python utils/process_test_artifacts.py # To avoid too long generated_config.yaml on the continuation orb, we pass the links to the artifacts as parameters. # Otherwise the list of tests was just too big. Explicit is good but for that it was a limitation. # We used: # https://circleci.com/docs/api/v2/index.html#operation/getJobArtifacts : to get the job artifacts # We could not pass a nested dict, which is why we create the test_file_... parameters for every single job - store_artifacts: path: test_preparation/transformed_artifacts.json - store_artifacts: path: test_preparation/artifacts.json - continuation/continue: parameters: test_preparation/transformed_artifacts.json configuration_path: test_preparation/generated_config.yml # To run all tests for the nightly build fetch_all_tests: working_directory: ~/transformers docker: - image: huggingface/transformers-quality parallelism: 1 steps: - checkout - run: uv pip install -U -e . - run: echo 'export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)"' >> "$BASH_ENV" && source "$BASH_ENV" - run: mkdir -p test_preparation - run: python utils/tests_fetcher.py --fetch_all | tee tests_fetched_summary.txt - run: python utils/tests_fetcher.py --filter_tests - run: export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)" && echo $GIT_COMMIT_MESSAGE && python .circleci/create_circleci_config.py --fetcher_folder test_preparation - run: | if [ ! -s test_preparation/generated_config.yml ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - store_artifacts: path: test_preparation - run: name: "Retrieve Artifact Paths" command: | project_slug="gh/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}" job_number=${CIRCLE_BUILD_NUM} url="https://circleci.com/api/v2/project/${project_slug}/${job_number}/artifacts" curl -o test_preparation/artifacts.json ${url} - run: name: "Prepare pipeline parameters" command: | python utils/process_test_artifacts.py # To avoid too long generated_config.yaml on the continuation orb, we pass the links to the artifacts as parameters. # Otherwise the list of tests was just too big. Explicit is good but for that it was a limitation. # We used: # https://circleci.com/docs/api/v2/index.html#operation/getJobArtifacts : to get the job artifacts # We could not pass a nested dict, which is why we create the test_file_... parameters for every single job - store_artifacts: path: test_preparation/transformed_artifacts.json - store_artifacts: path: test_preparation/artifacts.json - continuation/continue: parameters: test_preparation/transformed_artifacts.json configuration_path: test_preparation/generated_config.yml check_code_quality: working_directory: ~/transformers docker: - image: huggingface/transformers-quality resource_class: large environment: TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 parallelism: 1 steps: - checkout - run: uv pip install -e ".[quality]" - run: name: Show installed libraries and their versions command: pip freeze | tee installed.txt - store_artifacts: path: ~/transformers/installed.txt - run: python -c "from transformers import *" || (echo '๐Ÿšจ import failed, this means you introduced unprotected imports! ๐Ÿšจ'; exit 1) - run: ruff check examples tests src utils - run: ruff format examples tests src utils --check - run: python utils/custom_init_isort.py --check_only - run: python utils/sort_auto_mappings.py --check_only - run: python utils/check_doc_toc.py - run: python utils/check_docstrings.py --check_all check_repository_consistency: working_directory: ~/transformers docker: - image: huggingface/transformers-consistency resource_class: large environment: TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 parallelism: 1 steps: - checkout - run: uv pip install -e ".[quality]" - run: name: Show installed libraries and their versions command: pip freeze | tee installed.txt - store_artifacts: path: ~/transformers/installed.txt - run: python utils/check_copies.py - run: python utils/check_modular_conversion.py - run: python utils/check_dummies.py - run: python utils/check_repo.py - run: python utils/check_inits.py - run: python utils/check_pipeline_typing.py - run: python utils/check_config_docstrings.py - run: python utils/check_config_attributes.py - run: python utils/check_doctest_list.py - run: make deps_table_check_updated - run: python utils/update_metadata.py --check-only - run: python utils/check_docstrings.py workflows: version: 2 setup_and_quality: when: and: - equal: [<<pipeline.project.git_url>>, https://github.com/huggingface/transformers] - not: <<pipeline.parameters.nightly>> jobs: - check_circleci_user - check_code_quality - check_repository_consistency - fetch_tests setup_and_quality_2: when: not: equal: [<<pipeline.project.git_url>>, https://github.com/huggingface/transformers] jobs: - check_circleci_user - check_code_quality - check_repository_consistency - fetch_tests: # [reference] https://circleci.com/docs/contexts/ context: - TRANSFORMERS_CONTEXT nightly: when: <<pipeline.parameters.nightly>> jobs: - check_circleci_user - check_code_quality - check_repository_consistency - fetch_all_tests
transformers/.circleci/config.yml/0
{ "file_path": "transformers/.circleci/config.yml", "repo_id": "transformers", "token_count": 4669 }
375
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # How To Request Support This is an Open Source Project so please be mindful that like in any other project of this kind there is no obligation to answer all requests for help. However, we want to encourage you to ask for help whenever you think it's needed! We are happy about every question we get because it allows us to better understand your needs, possible misunderstandings, and most importantly a way for you to help us make this library better. That being said, this document's main purpose is to provide guidelines at how you can formulate your requests to increase your chances to be understood and to get support. There are two main venues to receive support: [the forums](https://discuss.huggingface.co/) and [the GitHub issues](https://github.com/huggingface/transformers/issues). ## The Forums [The user forums](https://discuss.huggingface.co/) are supported by the wide community of the library users and backed up by developers when needed. If you have a difficulty with deploying this library or some questions, or you'd like to discuss a new feature, please first consider discussing those things at the forums. Only when you feel your subject matter has been crystallized and you still need support from the library developers do proceed to file an [issue](https://github.com/huggingface/transformers/issues). In particular all "Please explain" questions or objectively very user-specific feature requests belong to the forums. Here are some example of such questions: * "I would like to use a BertModel within a RL-Agent for a customer support service. How can I use a BertForMaskedLM in my ChatBotModel?" * "Could you please explain why T5 has no positional embedding matrix under T5Model?" * "How should I set my generation parameters for translation?" * "How to train T5 on De->En translation?" ## The GitHub Issues Everything which hints at a bug should be opened as an [issue](https://github.com/huggingface/transformers/issues). You are not required to read the following guidelines before opening an issue. However, if you notice that your issue doesn't get any replies, chances are that the developers have one or several difficulties with its quality. In this case, reading the following points and adjusting your issue accordingly could help. 1. Before posting an issue, first search for already posted issues, since chances are someone has already asked a similar question before you. If you use Google your search query should be: ``` "huggingface" "transformers" your query ``` The first two quoted words tell Google to limit the search to the context of the Huggingface Transformers. The remainder is your query - most commonly this would be the error message the software fails with. We will go deeper into details shortly. The results of such a query will typically match GitHub issues, Hugging Face forums, StackExchange, and blogs. If you find relevant hints, you may choose to continue the discussion there if you have follow up questions. If what you found is similar but doesn't quite answer your problem, please, post a new issue and do include links to similar issues or forum discussions you may have found. Let's look at some examples: The error message, often referred to as an assertion, tells us what went wrong. Here is an example of an assertion: ```python Traceback (most recent call last): File "<string>", line 1, in <module> File "/transformers/src/transformers/__init__.py", line 34, in <module> from . import dependency_versions_check File "/transformers/src/transformers/dependency_versions_check.py", line 34, in <module> from .utils import is_tokenizers_available File "/transformers/src/transformers/utils/import_utils.py", line 40, in <module> from tqdm.auto import tqdm ModuleNotFoundError: No module named 'tqdm.auto' ``` and it typically includes a traceback, so that we can see the full stack of calls the program made before it fails. This gives us the context to know why the program failed. Going back to the above example. If you received this error search, look at the very last line of the error which is: ```python ModuleNotFoundError: No module named 'tqdm.auto' ``` And now we can use it to do the searching on your favorite search engine: 1. first for `"huggingface" "transformers" "ModuleNotFoundError: No module named 'tqdm.auto'"` 2. if you don't find relevant results, then search for just `"ModuleNotFoundError: No module named 'tqdm.auto'"` 3. and finally if nothing still comes up, then remove the outside quotes: `ModuleNotFoundError: No module named 'tqdm.auto'` If the error includes any messages that include bits unique to your filesystem, always remove those in the search query since other users will not have the same filesystem as yours. For example: ```bash python -c 'open("/tmp/wrong_path.txt", "r")' Traceback (most recent call last): File "<string>", line 1, in <module> FileNotFoundError: [Errno 2] No such file or directory: '/tmp/wrong_path.txt' ``` Here you'd search for just: `"FileNotFoundError: [Errno 2] No such file or directory"` If the local information that you removed were inside the error message and you removed them you may need to remove double quotes since your query is no longer exact. So if the error message was something like: ```bash ValueError: '/tmp/wrong_path.txt' cannot be found ``` then you'd search for `"ValueError" "cannot be found"` As you search you will notice that when you don't use quotes often the search engines will return a variety of unrelated hits, which may or may not be what you want. Experiment with different ways and find which approach gives the most satisfactory results. 2. Keep the issue short, providing the information that you think will aid the developers to understand your situation. Put yourself in the shoes of the person who has never seen your code or knows anything about your custom setup. This mental exercise will help to develop an intuition to what/what not to share" 3. If there is a software failure, always provide the full traceback, for example: ```python $ python -c 'import transformers' Traceback (most recent call last): File "<string>", line 1, in <module> File "/transformers/src/transformers/__init__.py", line 34, in <module> from . import dependency_versions_check File "/transformers/src/transformers/dependency_versions_check.py", line 34, in <module> from .utils import is_tokenizers_available File "/transformers/src/transformers/utils/import_utils.py", line 40, in <module> from tqdm.auto import tqdm ModuleNotFoundError: No module named 'tqdm.auto' ``` As compared to providing just the last line of the error message, e.g.: ```python ModuleNotFoundError: No module named 'tqdm.auto' ``` which is not sufficient. If your application is running on more than one GPU (e.g. under `DistributedDataParallel`) and typically getting every log and traceback printed multiple times, please make sure that you paste only one copy of it. At times the traceback from parallel processes may get interleaved - so either disentangle these or change the loggers to log only for `local_rank==0` so that only one process logs things. 4. When quoting a traceback, command line instructions and any type of code always enclose it in triple backticks inside the editor window, that is: ```` ``` git clone https://github.com/huggingface/transformers cd transformers pip install . ``` ```` If it's a command line with a long argument list, please consider breaking it down using backslashes and new lines. Here is an example of a good command line quote: ```bash cd examples/seq2seq torchrun --nproc_per_node=2 ./finetune_trainer.py \ --model_name_or_path sshleifer/distill-mbart-en-ro-12-4 --data_dir wmt_en_ro \ --output_dir output_dir --overwrite_output_dir \ --do_train --n_train 500 --num_train_epochs 1 \ --per_device_train_batch_size 1 --freeze_embeds \ --src_lang en_XX --tgt_lang ro_RO --task translation \ --fp16 ``` If you don't break it up, one has to scroll horizontally which often makes it quite difficult to quickly see what's happening. The backslashes allow us to copy the command directly into the console to run it, without needing to edit it. 5. Include only the important information that you think will help the developer to quickly identify the problem. For example applications often create huge amounts of logs. Ask yourself whether providing all or parts of the log is useful. Pasting a 100-1000 lines of log into the issue is an immediate turn off, since it will take a lot of time to figure out where the pertinent parts of the log are. Attaching a full log can be helpful if it's done as an attachment, if it's enclosed in the following html code in the comment editor window: ``` <details> <summary>Full log</summary> <pre> many lines go here </pre> </details> ``` which would result in the following entry, which can be opened if desired, but otherwise takes little space. <details> <summary>Full log</summary> <pre> many lines go here </pre> </details> You could also provide a link to a pastebin service, but this is less beneficial since those links tend to expire quickly and future readers of your issue might not be able to access that log file anymore and may lack some context. 6. If this is an issue in your code, do try to reduce that code to a minimal example that still demonstrates the problem. Please ask at the forums if you have a hard time figuring how to do that. Please realize that we don't have the luxury of having time to try and understand all of your custom code. If you really tried to make a short reproducible code but couldn't figure it out, it might be that having a traceback will give the developer enough information to know what's going on. But if it is not enough and we can't reproduce the problem, we can't really solve it. Do not despair if you can't figure it out from the beginning, just share what you can and perhaps someone else will be able to help you at the forums. If your setup involves any custom datasets, the best way to help us reproduce the problem is to create a [Google Colab notebook](https://colab.research.google.com/) that demonstrates the issue and once you verify that the issue still exists, include a link to that notebook in the Issue. Just make sure that you don't copy and paste the location bar url of the open notebook - as this is private and we won't be able to open it. Instead, you need to click on `Share` in the right upper corner of the notebook, select `Get Link` and then copy and paste the public link it will give to you. 7. If you forked off some of this project's code or example applications, please, do not ask us to go into your code repository and figure out what you may have done. The code is already very complex and unless there is an easy way to do a diff and it's a small diff, it won't be possible to find someone with time on their hands to make a lengthy investigation. Albeit, you might find someone at the forums who will be generous to do this for you. 8. Before reporting an issue, first, always try to update your environment to the latest official version of this library. We have no resources to go and debug older revisions, which could easily have bugs that have been fixed in the latest released version. We understand that this is not always possible, especially when APIs change, in which case file an issue against the highest library version your environment can support. Of course, if you upgrade the library, always retest that the problem is still there. 9. Please do not ask us to reproduce an issue with your custom data, since we don't have it. So, either you should use some existing dataset supported by HF datasets or you need to supply a code that generates a small sample on the fly, or some another quick and simple way to get it. Please do not send us any non-public domain data that may require a license or a permission to be used. 10. Do not tag multiple developers on the issue unless you know this is expected, either because you asked them and they gave you an explicit permission to tag them or the issue template instructs you to do so. The "who to tag for what domain" part of the issue template is there to help users direct their questions to the right developers who are designated maintainers of project's specific domains. They can then decide at their own discretion to tag other developers if they feel it'd help move the issue forward. We currently don't have a triage service and we trust your capacity to identify the right domain and thus the persons to tag in your issue. If you are not sure, please use the forums to ask for guidance. When in doubt, err on the side of not tagging a given person. If you tag multiple people out of context or permission don't be surprised if you get no response at all. Please remember that every time you tag someone, they get a notification and you're taking their time without their permission. Please be sensitive to that. If you got helped by one of the developers in the past please don't tag them in future issues, unless they are listed in the issue template for the domain you are asking about or that developer gave you an explicit permission to tag them in future issues. If you see a certain developer doing multiple and/or recent commits into a specific area of the project that you feel is relevant to your issue, it is not a good reason to tag them. Various developers may be fixing things that prevent them from moving forward, but often their work is focused on a totally different domain. And while they may or may not know how to help you with the problem at hand, it would benefit the whole community much more if they focus on the domain of their unique expertise. 11. Use the Edit button. Take your time, and re-read and improve the wording and formatting to make your posts and comments as easy to understand as possible. Avoid posting multiple comments in a row, as each comment generates a notification for the developers tagged in that issue. If you happened to post multiple comments in a row, and nobody followed up yet - consider merging those into one or a few comments while editing the combined content to be coherent. If you choose to edit your older comments after others posted follow up comments you need to be aware that your modifications might not be noticed, so if it's not a typo fixing, try to write a new comment flagging that something has been changed in the previous comments. For example, the very first comment is the most important one. If while the thread unfolds you realize that things aren't as they seemed to you originally you may want to edit the first post to reflect the up-to-date understanding of the issue at hand so that it helps those who read your issue in the future quickly understand what's going on and not need to sift through dozens of comments. It also helps to indicate that the post was edited. So, those reading the thread later can understand why there might be certain discontinuity in the information flow. Use bullets and items if you have lists of items and the outcome improves overall readability. Use backticks to refer to class and function names, e.g. `BartModel` and `generate` as these stand out and improve the speed of a reader's comprehension. Try not use italics and bold text too much as these often make the text more difficult to read. 12. If you are cross-referencing a specific comment in a given thread or another issue, always link to that specific comment, rather than using the issue link. If you do the latter it could be quite impossible to find which specific comment you're referring to. To get the link to the specific comment do not copy the url from the location bar of your browser, but instead, click the `...` icon in the upper right corner of the comment and then select "Copy Link". For example the first link is a link to an issue, and the second to a specific comment in the same issue: 1. https://github.com/huggingface/transformers/issues/9257 2. https://github.com/huggingface/transformers/issues/9257#issuecomment-749945162 13. If you are replying to a last comment, it's totally fine to make your reply with just your comment in it. The readers can follow the information flow here. But if you're replying to a comment that happened some comments back it's always a good practice to quote just the relevant lines you're replying it. The `>` is used for quoting, or you can always use the menu to do so. For example your editor box will look like: ``` > How big is your GPU cluster? Our cluster is made of 256 GPUs. ``` If you are addressing multiple comments, quote the relevant parts of each before your answer. Some people use the same comment to do multiple replies, others separate them into separate comments. Either way works. The latter approach helps for linking to a specific comment. In general the best way to figure out what works the best is learn from issues posted by other people - see which issues get great responses and which get little to no response - observe what the posters who received great responses did differently from those who did not. Thank you for reading this somewhat lengthy document. We would like to conclude that these are not absolute rules, but a friendly advice that will help maximize the chances for us to understand what you are trying to communicate, reproduce the problem then resolve it to your satisfaction and the benefit of the whole community. If after reading this document there are remaining questions on how and why or there is a need for further elucidation, please, don't hesitate to ask your question in [this thread](https://discuss.huggingface.co/t/how-to-request-support/3128).
transformers/ISSUES.md/0
{ "file_path": "transformers/ISSUES.md", "repo_id": "transformers", "token_count": 4682 }
376
import argparse import subprocess def main(config_dir, config_name, args): subprocess.run(["optimum-benchmark", "--config-dir", f"{config_dir}", "--config-name", f"{config_name}"] + ["hydra/job_logging=disabled", "hydra/hydra_logging=disabled"] + args) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config-dir", type=str, required=True, help="The path to the config directory.") parser.add_argument("--config-name", type=str, required=True, help="The config name.") args, unknown = parser.parse_known_args() main(args.config_dir, args.config_name, unknown)
transformers/benchmark/optimum_benchmark_wrapper.py/0
{ "file_path": "transformers/benchmark/optimum_benchmark_wrapper.py", "repo_id": "transformers", "token_count": 216 }
377
FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive # Use login shell to read variables from `~/.profile` (to pass dynamic created variables between RUN commands) SHELL ["sh", "-lc"] # The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant # to be used as arguments for docker build (so far). ARG PYTORCH='2.6.0' # Example: `cu102`, `cu113`, etc. ARG CUDA='cu121' # Disable kernel mapping for quantization tests ENV DISABLE_KERNEL_MAPPING=1 RUN apt update RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg RUN python3 -m pip install --no-cache-dir --upgrade pip ARG REF=main RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; echo "export VERSION='$VERSION'" >> ~/.profile RUN echo torch=$VERSION # `torchvision` and `torchaudio` should be installed along with `torch`, especially for nightly build. # Currently, let's just use their latest releases (when `torch` is installed with a release version) RUN python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio torchcodec --extra-index-url https://download.pytorch.org/whl/$CUDA RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate # needed in bnb and awq RUN python3 -m pip install --no-cache-dir einops # Add bitsandbytes for mixed int8 testing RUN python3 -m pip install --no-cache-dir bitsandbytes # Add gptqmodel for gtpq quantization testing, installed from source for pytorch==2.6.0 compatibility RUN python3 -m pip install lm_eval RUN git clone https://github.com/ModelCloud/GPTQModel.git && cd GPTQModel && pip install -v . --no-build-isolation # Add optimum for gptq quantization testing RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum # Add PEFT RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/peft@main#egg=peft # Add aqlm for quantization testing RUN python3 -m pip install --no-cache-dir aqlm[gpu]==1.0.2 # Add vptq for quantization testing RUN pip install vptq # Add spqr for quantization testing # Commented for now as No matching distribution found we need to reach out to the authors # RUN python3 -m pip install --no-cache-dir spqr_quant[gpu] # Add hqq for quantization testing RUN python3 -m pip install --no-cache-dir hqq # For GGUF tests RUN python3 -m pip install --no-cache-dir gguf # Add autoawq for quantization testing # New release v0.2.8 RUN python3 -m pip install --no-cache-dir autoawq[kernels] # Add quanto for quantization testing RUN python3 -m pip install --no-cache-dir optimum-quanto # Add eetq for quantization testing RUN git clone https://github.com/NetEase-FuXi/EETQ.git && cd EETQ/ && git submodule update --init --recursive && pip install . # # Add flute-kernel and fast_hadamard_transform for quantization testing # # Commented for now as they cause issues with the build # # TODO: create a new workflow to test them # RUN python3 -m pip install --no-cache-dir flute-kernel==0.4.1 # RUN python3 -m pip install --no-cache-dir git+https://github.com/Dao-AILab/fast-hadamard-transform.git # Add fp-quant for quantization testing # Requires py3.11 but our CI runs on 3.9 # RUN python3 -m pip install --no-cache-dir "fp-quant>=0.1.6" # Add compressed-tensors for quantization testing RUN python3 -m pip install --no-cache-dir compressed-tensors # Add AMD Quark for quantization testing RUN python3 -m pip install --no-cache-dir amd-quark # Add AutoRound for quantization testing RUN python3 -m pip install --no-cache-dir "auto-round>=0.5.0" # Add transformers in editable mode RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch] # `kernels` may give different outputs (within 1e-5 range) even with the same model (weights) and the same inputs RUN python3 -m pip uninstall -y kernels # Uninstall flash-attn installed by autoawq, it causes issues here : https://github.com/huggingface/transformers/actions/runs/15915442841/job/44892146131 RUN python3 -m pip uninstall -y flash-attn # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop
transformers/docker/transformers-quantization-latest-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-quantization-latest-gpu/Dockerfile", "repo_id": "transformers", "token_count": 1463 }
378
# ุงุณุชุฎุฏุงู… ู…ุฌุฒุฆูŠุงุช ุงู„ู†ุตูˆุต ู…ู† ๐Ÿค— Tokenizers ูŠุนุชู…ุฏ [`PreTrainedTokenizerFast`] ุนู„ู‰ ู…ูƒุชุจุฉ [๐Ÿค— Tokenizers](https://huggingface.co/docs/tokenizers). ูŠู…ูƒู† ุชุญู…ูŠู„ ุงู„ู…ุฌุฒุฆุงุช ุงู„ู„ุบูˆูŠูŠู† ุงู„ุฐูŠู† ุชู… ุงู„ุญุตูˆู„ ุนู„ูŠู‡ู… ู…ู† ู…ูƒุชุจุฉ ๐Ÿค— Tokenizers ุจุจุณุงุทุฉ ุดุฏูŠุฏุฉ ููŠ ๐Ÿค— Transformers. ู‚ุจู„ ุงู„ุฏุฎูˆู„ ููŠ ุงู„ุชูุงุตูŠู„ุŒ ุฏุนูˆู†ุง ู†ุจุฏุฃ ุฃูˆู„ุงู‹ ุจุฅู†ุดุงุก ู…ูุฌุฒู‰ุก ู„ุบูˆูŠ ุชุฌุฑูŠุจูŠ ููŠ ุจุถุน ุณุทูˆุฑ: ```python >>> from tokenizers import Tokenizer >>> from tokenizers.models import BPE >>> from tokenizers.trainers import BpeTrainer >>> from tokenizers.pre_tokenizers import Whitespace >>> tokenizer = Tokenizer(BPE(unk_token="[UNK]")) >>> trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) >>> tokenizer.pre_tokenizer = Whitespace() >>> files = [...] >>> tokenizer.train(files, trainer) ``` ุงู„ุขู† ู„ุฏูŠู†ุง ู…ูุฌุฒู‰ุก ู„ุบูˆูŠ ู…ุฏุฑุจ ุนู„ู‰ ุงู„ู…ู„ูุงุช ุงู„ุชูŠ ุญุฏุฏู†ุงู‡ุง. ูŠู…ูƒู†ู†ุง ุฅู…ุง ุงู„ุงุณุชู…ุฑุงุฑ ููŠ ุงุณุชุฎุฏุงู…ู‡ ููŠ ูˆู‚ุช ุงู„ุชุดุบูŠู„ ู‡ุฐุงุŒ ุฃูˆ ุญูุธู‡ ููŠ ู…ู„ู JSON ู„ุฅุนุงุฏุฉ ุงุณุชุฎุฏุงู…ู‡ ู„ุงุญู‚ู‹ุง. ## ุชุญู…ูŠู„ ู…ูุฌุฒุฆ ุงู„ู†ู‘ุตูˆุต ู…ูุจุงุดุฑุฉู‹ ุฏุนูˆู†ุง ู†ุฑู‰ ูƒูŠู ูŠู…ูƒู†ู†ุง ุงู„ุงุณุชูุงุฏุฉ ู…ู† ูƒุงุฆู† (ู…ูุฌุฒุฆ ุงู„ู†ุตูˆุต) ููŠ ู…ูƒุชุจุฉ ๐Ÿค— Transformers. ุชุณู…ุญ ูุฆุฉ [`PreTrainedTokenizerFast`] ุณู‡ูˆู„ุฉ ุฅู†ุดุงุก *tokenizer*ุŒ ู…ู† ุฎู„ุงู„ ู‚ุจูˆู„ ูƒุงุฆู† *ุงู„ู…ูุฌุฒุฆ ุงู„ู†ุตูˆุต* ู…ูู‡ูŠู‘ุฃ ู…ูุณุจู‚ู‹ุง ูƒู…ุนุงู…ู„: ```python >>> from transformers import PreTrainedTokenizerFast >>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) ``` ูŠู…ูƒู† ุงู„ุขู† ุงุณุชุฎุฏุงู… ู‡ุฐุง ุงู„ูƒุงุฆู† ู…ุน ุฌู…ูŠุน ุงู„ุทุฑู‚ ุงู„ู…ูุดุชุฑูƒุฉ ุจูŠู† ู…ูุฌุฒู‘ุฆูŠ ุงู„ู†ู‘ุตูˆุต ู„ู€ ๐Ÿค— Transformers! ุงู†ุชู‚ู„ ุฅู„ู‰ [ุตูุญุฉ ู…ูุฌุฒู‘ุฆ ุงู„ู†ู‘ุตูˆุต](main_classes/tokenizer) ู„ู…ุฒูŠุฏ ู…ู† ุงู„ู…ุนู„ูˆู…ุงุช. ## ุงู„ุชุญู…ูŠู„ ู…ู† ู…ู„ู JSON ู„ุชุญู…ูŠู„ ู…ูุฌุฒู‘ุฆ ุงู„ู†ุต ู…ู† ู…ู„ู JSONุŒ ุฏุนูˆู†ุง ู†ุจุฏุฃ ุฃูˆู„ุงู‹ ุจุญูุธ ู…ูุฌุฒู‘ุฆ ุงู„ู†ู‘ุตูˆุต: ```python >>> tokenizer.save("tokenizer.json") ``` ูŠู…ูƒู† ุชู…ุฑูŠุฑ ุงู„ู…ุณุงุฑ ุงู„ุฐูŠ ุญูุธู†ุง ุจู‡ ู‡ุฐุง ุงู„ู…ู„ู ุฅู„ู‰ ุทุฑูŠู‚ุฉ ุชู‡ูŠุฆุฉ [`PreTrainedTokenizerFast`] ุจุงุณุชุฎุฏุงู… ุงู„ู…ูุนุงู…ู„ `tokenizer_file`: ```python >>> from transformers import PreTrainedTokenizerFast >>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json") ``` ูŠู…ูƒู† ุงู„ุขู† ุงุณุชุฎุฏุงู… ู‡ุฐุง ุงู„ูƒุงุฆู† ู…ุน ุฌู…ูŠุน ุงู„ุทุฑู‚ ุงู„ุชูŠ ุชุดุชุฑูƒ ููŠู‡ุง ู…ูุฌุฒู‘ุฆูŠ ุงู„ู†ู‘ุตูˆุต ู„ู€ ๐Ÿค— Transformers! ุงู†ุชู‚ู„ ุฅู„ู‰ [ุตูุญุฉ ู…ูุฌุฒู‘ุฆ ุงู„ู†ุต](main_classes/tokenizer) ู„ู…ุฒูŠุฏ ู…ู† ุงู„ู…ุนู„ูˆู…ุงุช.
transformers/docs/source/ar/fast_tokenizers.md/0
{ "file_path": "transformers/docs/source/ar/fast_tokenizers.md", "repo_id": "transformers", "token_count": 1394 }
379
# ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ ู„ู„ู†ู…ุงุฐุฌ ุฐุงุช ุงู„ุทูˆู„ ุงู„ุซุงุจุช [[open-in-colab]] ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ (PPL) ู‡ูŠ ูˆุงุญุฏุฉ ู…ู† ุฃูƒุซุฑ ุงู„ู…ู‚ุงูŠูŠุณ ุดูŠูˆุนู‹ุง ู„ุชู‚ูŠูŠู… ู†ู…ุงุฐุฌ ุงู„ู„ุบุฉ. ู‚ุจู„ ุงู„ุฎูˆุถ ููŠ ุงู„ุชูุงุตูŠู„ุŒ ูŠุฌุจ ุฃู† ู†ู„ุงุญุธ ุฃู† ุงู„ู…ู‚ูŠุงุณ ูŠู†ุทุจู‚ ุชุญุฏูŠุฏู‹ุง ุนู„ู‰ ู†ู…ุงุฐุฌ ุงู„ู„ุบุฉ ุงู„ูƒู„ุงุณูŠูƒูŠุฉ (ูŠูุทู„ู‚ ุนู„ูŠู‡ุง ุฃุญูŠุงู†ู‹ุง ู†ู…ุงุฐุฌ ุงู„ู„ุบุฉ ุงู„ุชู„ู‚ุงุฆูŠุฉ ุงู„ู…ุฑุฌุนูŠุฉ ุฃูˆ ุงู„ุณุจุจูŠุฉ) ูˆู‡ูŠ ุบูŠุฑ ู…ุญุฏุฏุฉ ุฌูŠุฏู‹ุง ู„ู†ู…ุงุฐุฌ ุงู„ู„ุบุฉ ุงู„ู…ู‚ู†ุนุฉ ู…ุซู„ BERT (ุฑุงุฌุน [ู…ู„ุฎุต ุงู„ู†ู…ุงุฐุฌ](model_summary)). ุชูุนุฑูŽู‘ู ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ ุนู„ู‰ ุฃู†ู‡ุง ุงู„ุฃุณ ุงู„ู…ูุฑููˆุน ู„ู‚ูŠู…ุฉ ู…ุชูˆุณุท ุงู„ู„ูˆุบุงุฑูŠุชู… ุงู„ุงุญุชู…ุงู„ูŠ ู„ู…ุชุชุงู„ูŠุฉ. ุฅุฐุง ูƒุงู† ู„ุฏูŠู†ุง ุชุณู„ุณู„ ุฑู…ุฒูŠ \\(X = (x_0, x_1, \dots, x_t)\\)ุŒ ูุฅู† ุญูŠุฑุฉ \\(X\\) ู‡ูŠุŒ $$\text{PPL}(X) = \exp \left\{ {-\frac{1}{t}\sum_i^t \log p_\theta (x_i|x_{<i}) } \right\}$$ ุญูŠุซ \\(\log p_\theta (x_i|x_{<i})\\) ู‡ูˆ ุงู„ู„ูˆุบุงุฑูŠุชู… ุงู„ุงุญุชู…ุงู„ูŠ ู„ู„ุฑู…ุฒ i ุจุดุฑุท ุงู„ุฑู…ูˆุฒ ุงู„ุณุงุจู‚ุฉ \\(x_{<i}\\) ูˆูู‚ู‹ุง ู„ู†ู…ูˆุฐุฌู†ุง. ูˆู…ู† ุงู„ู†ุงุญูŠุฉ ุงู„ุจุฏูŠู‡ูŠุฉุŒ ูŠู…ูƒู† ุงุนุชุจุงุฑู‡ุง ุชู‚ูŠูŠู…ู‹ุง ู„ู‚ุฏุฑุฉ ุงู„ู†ู…ูˆุฐุฌ ุนู„ู‰ ุงู„ุชู†ุจุค ุจุงู„ุชุณุงูˆูŠ ุจูŠู† ู…ุฌู…ูˆุนุฉ ู…ู† ุงู„ุฑู…ูˆุฒ ุงู„ู…ุญุฏุฏุฉ ููŠ ู…ุฌู…ูˆุนุฉ ู…ู† ุงู„ุจูŠุงู†ุงุช. ูˆู…ู† ุงู„ู…ู‡ู… ุงู„ุฅุดุงุฑุฉ ุฅู„ู‰ ุฃู† ุนู…ู„ูŠุฉ ุงู„ุชู…ูŠูŠุฒ ู„ู‡ ุชุฃุซูŠุฑ ู…ุจุงุดุฑู‹ุง ุนู„ู‰ ุญูŠุฑุฉ ุงู„ู†ู…ูˆุฐุฌุŒูˆูŠุฌุจ ู…ุฑุงุนุงุชู‡ุง ุฏุงุฆู…ู‹ุง ุนู†ุฏ ู…ู‚ุงุฑู†ุฉ ุงู„ู†ู…ุงุฐุฌ ุงู„ู…ุฎุชู„ูุฉ. ูƒู…ุง ุฃู†ู‡ุง ุชุนุงุฏู„ ุงู„ุฃุณ ุงู„ู…ูุฑููˆุน ู„ู‚ูŠู…ุฉ ุงู„ุงู†ุชุฑูˆุจูŠุง ุงู„ู…ุชู‚ุงุทุนุฉ ุจูŠู† ุงู„ุจูŠุงู†ุงุช ูˆุชู†ุจุคุงุช ุงู„ู†ู…ูˆุฐุฌ. ู„ู…ุฒูŠุฏ ู…ู† ุงู„ูู‡ู… ุญูˆู„ ู…ูู‡ูˆู… ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ ูˆุนู„ุงู‚ุชู‡ุง ุจู€ Bits Per Character (BPC) ูˆุถุบุท ุงู„ุจูŠุงู†ุงุชุŒ ูŠูุฑุฌู‰ ู…ุฑุงุฌุนุฉ [ุงู„ุชุฏูˆูŠู†ุฉ ุงู„ู…ููŠุฏุฉ ุนู„ู‰ The Gradient](https://thegradient.pub/understanding-evaluation-metrics-for-language-models/). ## ุญุณุงุจ PPL ู…ุน ุงู„ู†ู…ุงุฐุฌ ุฐุงุช ุงู„ุทูˆู„ ุงู„ุซุงุจุช ุฅุฐุง ู„ู… ู†ูƒู† ู…ู‚ูŠุฏูŠู† ุจุญุฌู… ุณูŠุงู‚ ุงู„ู†ู…ูˆุฐุฌุŒ ูุณู†ู‚ูˆู… ุจุชู‚ูŠูŠู… ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ ู„ู„ู†ู…ูˆุฐุฌ ุนู† ุทุฑูŠู‚ ุชุญู„ูŠู„ ุงู„ุชุณู„ุณู„ ุชู„ู‚ุงุฆูŠู‹ุง ูˆุงู„ุดุฑุท ุนู„ู‰ ุงู„ุชุณู„ุณู„ ุงู„ูุฑุนูŠ ุงู„ุณุงุจู‚ ุจุงู„ูƒุงู…ู„ ููŠ ูƒู„ ุฎุทูˆุฉุŒ ูƒู…ุง ู‡ูˆ ู…ูˆุถุญ ุฃุฏู†ุงู‡. <img width="600" alt="Full decomposition of a sequence with unlimited context length" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_full.gif"/> ู„ูƒู† ุนู†ุฏ ุงู„ุชุนุงู…ู„ ู…ุน ุงู„ู†ู…ุงุฐุฌ ุงู„ุชู‚ุฑูŠุจูŠุฉุŒ ู†ูˆุงุฌู‡ ุนุงุฏุฉู‹ ู‚ูŠุฏู‹ุง ุนู„ู‰ ุนุฏุฏ ุงู„ุฑู…ูˆุฒ ุงู„ุชูŠ ูŠู…ูƒู† ู„ู„ู†ู…ูˆุฐุฌ ู…ุนุงู„ุฌุชู‡ุง. ุนู„ู‰ ุณุจูŠู„ ุงู„ู…ุซุงู„ุŒ ุชุญุชูˆูŠ ุฃูƒุจุฑ ู†ุณุฎุฉ ู…ู† [GPT-2](model_doc/gpt2) ุนู„ู‰ ุทูˆู„ ุซุงุจุช ูŠุจู„ุบ 1024 ุฑู…ุฒู‹ุงุŒ ู„ุฐุง ู„ุง ูŠู…ูƒู†ู†ุง ุญุณุงุจ \\(p_\theta(x_t|x_{<t})\\) ู…ุจุงุดุฑุฉ ุนู†ุฏู…ุง ุชูƒูˆู† \\(t\\) ุฃูƒุจุฑ ู…ู† 1024. ุจุฏู„ุงู‹ ู…ู† ุฐู„ูƒุŒ ูŠุชู… ุนุงุฏุฉู‹ ุชู‚ุณูŠู… ุงู„ุชุณู„ุณู„ ุฅู„ู‰ ุชุณู„ุณู„ุงุช ูุฑุนูŠุฉ ู…ุณุงูˆูŠุฉ ู„ุญุฌู… ุงู„ุฅุฏุฎุงู„ ุงู„ุฃู‚ุตู‰ ู„ู„ู†ู…ูˆุฐุฌ. ูุฅุฐุง ูƒุงู† ุญุฌู… ุงู„ุฅุฏุฎุงู„ ุงู„ุฃู‚ุตู‰ ู„ู„ู†ู…ูˆุฐุฌ ู‡ูˆ \\(k\\)ุŒ ูุฅู†ู†ุง ู†ู‚ุฑุจ ุงุญุชู…ุงู„ ุงู„ุฑู…ุฒ \\(x_t\\) ุนู† ุทุฑูŠู‚ ุงู„ุงุดุชู‚ุงู‚ ุงู„ุดุฑุทูŠ ูู‚ุท ุจุงู„ู†ุณุจุฉ ุฅู„ู‰ \\(k-1\\) ู…ู† ุงู„ุฑู…ูˆุฒ ุงู„ุชูŠ ุชุณุจู‚ู‡ ุจุฏู„ุงู‹ ู…ู† ุงู„ุณูŠุงู‚ ุจุฃูƒู…ู„ู‡. ูˆุนู†ุฏ ุชู‚ูŠูŠู… ุญูŠุฑุฉ ุงู„ู†ู…ูˆุฐุฌ ู„ุชุณู„ุณู„ ู…ุงุŒ ู‚ุฏ ูŠุจุฏูˆ ู…ู† ุงู„ู…ุบุฑูŠ ุชู‚ุณูŠู… ุงู„ุชุณู„ุณู„ ุฅู„ู‰ ุฃุฌุฒุงุก ู…ู†ูุตู„ุฉ ูˆุฌู…ุน ู…ุฌู…ูˆุน ุฏูˆุงู„ ุงู„ู„ูˆุบุงุฑูŠุชู… ู„ูƒู„ ุฌุฒุก ุจุดูƒู„ ู…ุณุชู‚ู„ุŒ ู„ูƒู† ู‡ุฐุง ุงู„ุฃุณู„ูˆุจ ู„ูŠุณ ุงู„ุฃู…ุซู„. <img width="600" alt="Suboptimal PPL not taking advantage of full available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_chunked.gif"/> ุชุชู…ูŠุฒ ู‡ุฐู‡ ุงู„ุทุฑูŠู‚ุฉ ุจุณุฑุนุฉ ุญุณุงุจู‡ุง ู†ุธุฑู‹ุง ู„ุฅู…ูƒุงู†ูŠุฉ ุญุณุงุจ ุฏุฑุฌุฉ ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ ู„ูƒู„ ุฌุฒุก ุจู…ุณุญ ูˆุงุญุฏ ู„ู„ุฃู…ุงู…ุŒ ุฅู„ุง ุฃู†ู‡ุง ุชูุนุฏู‘ ุชู‚ุฑูŠุจู‹ุง ุถุนูŠูู‹ุง ู„ุฏุฑุฌุฉ ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ ุงู„ู…ูุญู„ู‘ู„ุฉ ุจุดูƒู„ ูƒุงู…ู„ุŒ ูˆุนุงุฏุฉู‹ ู…ุง ุชุคุฏูŠ ุฅู„ู‰ ุฏุฑุฌุฉ ุชุนู‚ูŠุฏ ู„ุบูˆูŠ ุฃุนู„ู‰ (ุฃุณูˆุฃ) ู„ุฃู† ุงู„ู†ู…ูˆุฐุฌ ุณูŠูƒูˆู† ู„ุฏูŠู‡ ุณูŠุงู‚ ุฃู‚ู„ ููŠ ู…ุนุธู… ุฎุทูˆุงุช ุงู„ุชู†ุจุค. ุจุฏู„ุงู‹ ู…ู† ุฐู„ูƒุŒ ูŠุฌุจ ุชู‚ูŠูŠู… ุฏุฑุฌุฉ ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ ู„ู„ู†ู…ุงุฐุฌ ุฐุงุช ุงู„ุทูˆู„ ุงู„ุซุงุจุช ุจุงุณุชุฎุฏุงู… ุฅุณุชุฑุงุชูŠุฌูŠุฉ ุงู„ู†ุงูุฐุฉ ุงู„ู…ู†ุฒู„ู‚ุฉ. ูˆูŠู†ุทูˆูŠ ู‡ุฐุง ุนู„ู‰ ุชุญุฑูŠูƒ ู†ุงูุฐุฉ ุงู„ุณูŠุงู‚ ุจุดูƒู„ ู…ุชูƒุฑุฑ ุจุญูŠุซ ูŠูƒูˆู† ู„ู„ู†ู…ูˆุฐุฌ ุณูŠุงู‚ ุฃูƒุจุฑ ุนู†ุฏ ุฅุฌุฑุงุก ูƒู„ ุชู†ุจุค. <img width="600" alt="Sliding window PPL taking advantage of all available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_sliding.gif"/> ู‡ุฐุง ุชู‚ุฑูŠุจ ุฃู‚ุฑุจ ู„ู„ุชููƒูŠูƒ ุงู„ุญู‚ูŠู‚ูŠ ู„ุงุญุชู…ุงู„ูŠุฉ ุงู„ุชุณู„ุณู„ ูˆุณูŠุคุฏูŠ ุนุงุฏุฉู‹ ุฅู„ู‰ ู†ุชูŠุฌุฉ ุฃูุถู„.ู„ูƒู† ุงู„ุฌุงู†ุจ ุงู„ุณู„ุจูŠ ู‡ูˆ ุฃู†ู‡ ูŠุชุทู„ุจ ุชู…ุฑูŠุฑู‹ุง ู„ู„ุฃู…ุงู… ู„ูƒู„ ุฑู…ุฒ ููŠ ู…ุฌู…ูˆุนุฉ ุงู„ุจูŠุงู†ุงุช. ุญู„ ูˆุณุท ุนู…ู„ูŠ ู…ู†ุงุณุจ ู‡ูˆ ุงุณุชุฎุฏุงู… ู†ุงูุฐุฉ ู…ู†ุฒู„ู‚ุฉ ุจุฎุทูˆุฉุŒ ุจุญูŠุซ ูŠุชู… ุชุญุฑูŠูƒ ุงู„ุณูŠุงู‚ ุจุฎุทูˆุงุช ุฃูƒุจุฑ ุจุฏู„ุงู‹ ู…ู† ุงู„ุงู†ุฒู„ุงู‚ ุจู…ู‚ุฏุงุฑ 1 ุฑู…ุฒ ููŠ ูƒู„ ู…ุฑุฉ. ู…ู…ุง ูŠุณู…ุญ ุจุฅุฌุฑุงุก ุงู„ุญุณุงุจ ุจุดูƒู„ ุฃุณุฑุน ู…ุน ุฅุนุทุงุก ุงู„ู†ู…ูˆุฐุฌ ุณูŠุงู‚ู‹ุง ูƒุจูŠุฑู‹ุง ู„ู„ุชู†ุจุคุงุช ููŠ ูƒู„ ุฎุทูˆุฉ. ## ู…ุซุงู„: ุญุณุงุจ ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ ู…ุน GPT-2 ููŠ ๐Ÿค— Transformers ุฏุนูˆู†ุง ู†ูˆุถุญ ู‡ุฐู‡ ุงู„ุนู…ู„ูŠุฉ ู…ุน GPT-2. ```python from transformers import GPT2LMHeadModel, GPT2TokenizerFast device = "cuda" model_id = "openai-community/gpt2-large" model = GPT2LMHeadModel.from_pretrained(model_id).to(device) tokenizer = GPT2TokenizerFast.from_pretrained(model_id) ``` ุณู†ู‚ูˆู… ุจุชุญู…ูŠู„ ู…ุฌู…ูˆุนุฉ ุจูŠุงู†ุงุช WikiText-2 ูˆุชู‚ูŠูŠู… ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ ุจุงุณุชุฎุฏุงู… ุจุนุถ ุฅุณุชุฑุงุชูŠุฌูŠุงุช ู…ุฎุชู„ูุฉ ุงู„ู†ุงูุฐุฉ ุงู„ู…ู†ุฒู„ู‚ุฉ. ู†ุธุฑู‹ุง ู„ุฃู† ู‡ุฐู‡ ุงู„ู…ุฌู…ูˆุนุฉ ุงู„ุจูŠุงู†ุงุช ุงู„ุตุบูŠุฑุฉ ูˆู†ู‚ูˆู… ูู‚ุท ุจู…ุณุญ ูˆุงุญุฏ ูู‚ุท ู„ู„ู…ุฌู…ูˆุนุฉุŒ ููŠู…ูƒู†ู†ุง ุจุจุณุงุทุฉ ุชุญู…ูŠู„ ู…ุฌู…ูˆุนุฉ ุงู„ุจูŠุงู†ุงุช ูˆุชุฑู…ูŠุฒู‡ุง ุจุงู„ูƒุงู…ู„ ููŠ ุงู„ุฐุงูƒุฑุฉ. ```python from datasets import load_dataset test = load_dataset("wikitext", "wikitext-2-raw-v1", split="test") encodings = tokenizer("\n\n".join(test["text"]), return_tensors="pt") ``` ู…ุน ๐Ÿค— TransformersุŒ ูŠู…ูƒู†ู†ุง ุจุจุณุงุทุฉ ุชู…ุฑูŠุฑ `input_ids` ูƒู€ `labels` ุฅู„ู‰ ู†ู…ูˆุฐุฌู†ุงุŒ ูˆุณูŠุชู… ุฅุฑุฌุงุน ู…ุชูˆุณุท ุงุญุชู…ุงู„ูŠุฉ ุงู„ุณุฌู„ ุงู„ุณุงู„ุจ ู„ูƒู„ ุฑู…ุฒ ูƒุฎุณุงุฑุฉ. ูˆู…ุน ุฐู„ูƒุŒ ู…ุน ู†ู‡ุฌ ุงู„ู†ุงูุฐุฉ ุงู„ู…ู†ุฒู„ู‚ุฉุŒ ู‡ู†ุงูƒ ุชุฏุงุฎู„ ููŠ ุงู„ุฑู…ูˆุฒ ุงู„ุชูŠ ู†ู…ุฑุฑู‡ุง ุฅู„ู‰ ุงู„ู†ู…ูˆุฐุฌ ููŠ ูƒู„ ุชูƒุฑุงุฑ. ู„ุง ู†ุฑูŠุฏ ุชุถู…ูŠู† ุงุญุชู…ุงู„ูŠุฉ ุงู„ุณุฌู„ ู„ู„ุฑู…ูˆุฒ ุงู„ุชูŠ ู†ุชุนุงู…ู„ ู…ุนู‡ุง ูƒุณูŠุงู‚ ูู‚ุท ููŠ ุฎุณุงุฑุชู†ุงุŒ ู„ุฐุง ูŠู…ูƒู†ู†ุง ุชุนูŠูŠู† ู‡ุฐู‡ ุงู„ุฃู‡ุฏุงู ุฅู„ู‰ `-100` ุจุญูŠุซ ูŠุชู… ุชุฌุงู‡ู„ู‡ุง. ููŠู…ุง ูŠู„ูŠ ู‡ูˆ ู…ุซุงู„ ุนู„ู‰ ูƒูŠููŠุฉ ุงู„ู‚ูŠุงู… ุจุฐู„ูƒ ุจุฎุทูˆุฉ ุชุจู„ุบ `512`. ูˆู‡ุฐุง ูŠุนู†ูŠ ุฃู† ุงู„ู†ู…ูˆุฐุฌ ุณูŠูƒูˆู† ู„ุฏูŠู‡ 512 ุฑู…ุฒู‹ุง ุนู„ู‰ ุงู„ุฃู‚ู„ ู„ู„ุณูŠุงู‚ ุนู†ุฏ ุญุณุงุจ ุงู„ุงุญุชู…ุงู„ูŠุฉ ุงู„ุดุฑุทูŠุฉ ู„ุฃูŠ ุฑู…ุฒ ูˆุงุญุฏ (ุจุดุฑุท ุชูˆูุฑ 512 ุฑู…ุฒู‹ุง ุณุงุจู‚ู‹ุง ู…ุชุงุญู‹ุง ู„ู„ุงุดุชู‚ุงู‚). ```python import torch from tqdm import tqdm max_length = model.config.n_positions stride = 512 seq_len = encodings.input_ids.size(1) nlls = [] prev_end_loc = 0 for begin_loc in tqdm(range(0, seq_len, stride)): end_loc = min(begin_loc + max_length, seq_len) trg_len = end_loc - prev_end_loc # ู‚ุฏ ุชูƒูˆู† ู…ุฎุชู„ูุฉ ุนู† ุงู„ุฎุทูˆุฉ ููŠ ุงู„ุญู„ู‚ุฉ ุงู„ุฃุฎูŠุฑุฉ input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device) target_ids = input_ids.clone() target_ids[:, :-trg_len] = -100 with torch.no_grad(): outputs = model(input_ids, labels=target_ids) # ูŠุชู… ุญุณุงุจ ุงู„ุฎุณุงุฑุฉ ุจุงุณุชุฎุฏุงู… CrossEntropyLoss ุงู„ุฐูŠ ูŠู‚ูˆู… ุจุงู„ู…ุชูˆุณุท ุนู„ู‰ ุงู„ุชุตู†ูŠูุงุช ุงู„ุตุญูŠุญุฉ # ู„ุงุญุธ ุฃู† ุงู„ู†ู…ูˆุฐุฌ ูŠุญุณุจ ุงู„ุฎุณุงุฑุฉ ุนู„ู‰ trg_len - 1 ู…ู† ุงู„ุชุตู†ูŠูุงุช ูู‚ุทุŒ ู„ุฃู†ู‡ ูŠุชุญูˆู„ ุฏุงุฎู„ูŠู‹ุง ุฅู„ู‰ ุงู„ูŠุณุงุฑ ุจูˆุงุณุทุฉ 1. neg_log_likelihood = outputs.loss nlls.append(neg_log_likelihood) prev_end_loc = end_loc if end_loc == seq_len: break ppl = torch.exp(torch.stack(nlls).mean()) ``` ูŠุนุฏ ุชุดุบูŠู„ ู‡ุฐุง ู…ุน ุทูˆู„ ุงู„ุฎุทูˆุฉ ู…ุณุงูˆูŠู‹ุง ู„ุทูˆู„ ุงู„ุฅุฏุฎุงู„ ุงู„ุฃู‚ุตู‰ ูŠุนุงุฏู„ ู„ุงุณุชุฑุงุชูŠุฌูŠุฉ ุงู„ู†ุงูุฐุฉ ุบูŠุฑ ุงู„ู…ู†ุฒู„ู‚ุฉ ูˆุบูŠุฑ ุงู„ู…ุซู„ู‰ ุงู„ุชูŠ ู†ุงู‚ุดู†ุงู‡ุง ุฃุนู„ุงู‡. ูˆูƒู„ู…ุง ุตุบุฑุช ุงู„ุฎุทูˆุฉุŒ ุฒุงุฏ ุงู„ุณูŠุงู‚ ุงู„ุฐูŠ ุณูŠุญุตู„ ุนู„ูŠู‡ ุงู„ู†ู…ูˆุฐุฌ ููŠ ุนู…ู„ ูƒู„ ุชู†ุจุคุŒ ูˆูƒู„ู…ุง ูƒุงู†ุช ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ ุงู„ู…ูุจู„ุบ ุนู†ู‡ุง ุฃูุถู„ ุนุงุฏุฉู‹. ุนู†ุฏู…ุง ู†ู‚ูˆู… ุจุชุดุบูŠู„ ู…ุง ุณุจู‚ ุจุงุณุชุฎุฏุงู… `stride = 1024`ุŒ ุฃูŠ ุจุฏูˆู† ุชุฏุงุฎู„ุŒ ุชูƒูˆู† ุฏุฑุฌุฉ ุงู„ุชุนู‚ูŠุฏ ุงู„ู„ุบูˆูŠ ุงู„ู†ุงุชุฌุฉ ู‡ูŠ `19.44`ุŒ ูˆู‡ูˆ ู…ุง ูŠู…ุงุซู„ `19.93` ุงู„ู…ุจู„ุบ ุนู†ู‡ุง ููŠ ูˆุฑู‚ุฉ GPT-2. ู…ู† ุฎู„ุงู„ ุงุณุชุฎุฏุงู… `stride = 512` ูˆุจุงู„ุชุงู„ูŠ ุงุณุชุฎุฏุงู… ุฅุณุชุฑุงุชูŠุฌูŠุฉ ุงู„ู†ุงูุฐุฉ ุงู„ู…ู†ุฒู„ู‚ุฉุŒ ูŠู†ุฎูุถ ู‡ุฐุง ุฅู„ู‰ `16.45`. ู‡ุฐู‡ ุงู„ู†ุชูŠุฌุฉ ู„ูŠุณุช ูู‚ุท ุฃูุถู„ุŒ ูˆู„ูƒู†ู‡ุง ู…ุญุณูˆุจุฉ ุจุทุฑูŠู‚ุฉ ุฃู‚ุฑุจ ุฅู„ู‰ ุงู„ุชุญู„ูŠู„ ุงู„ุชู„ู‚ุงุฆูŠ ุงู„ุญู‚ูŠู‚ูŠ ู„ุงุญุชู…ุงู„ูŠุฉ ุงู„ุชุณู„ุณู„.
transformers/docs/source/ar/perplexity.md/0
{ "file_path": "transformers/docs/source/ar/perplexity.md", "repo_id": "transformers", "token_count": 5357 }
380
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ุชุตู†ูŠู ุงู„ุฑู…ูˆุฒ(Token classification) [[open-in-colab]] <Youtube id="wVHdVlPScxA"/> ูŠู‡ุฏู ุชุตู†ูŠู ุงู„ุฑู…ูˆุฒ ุฅู„ู‰ ุฅุนุทุงุก ุชุณู…ูŠุฉ ู„ูƒู„ ุฑู…ุฒ ุนู„ู‰ ุญุฏุฉ ููŠ ุงู„ุฌู…ู„ุฉ. ู…ู† ุฃูƒุซุฑ ู…ู‡ุงู… ุชุตู†ูŠู ุงู„ุฑู…ูˆุฒ ุดูŠูˆุนู‹ุง ู‡ูˆ ุงู„ุชุนุฑู ุนู„ู‰ ุงู„ูƒูŠุงู†ุงุช ุงู„ู…ุณู…ุงุฉ (NER). ูŠุญุงูˆู„ NER ุชุญุฏูŠุฏ ุชุณู…ูŠุฉ ู„ูƒู„ ูƒูŠุงู† ููŠ ุงู„ุฌู…ู„ุฉุŒ ู…ุซู„ ุดุฎุตุŒ ุฃูˆ ู…ูƒุงู†ุŒ ุฃูˆ ู…ู†ุธู…ุฉ. ุณูŠูˆุถุญ ู„ูƒ ู‡ุฐุง ุงู„ุฏู„ูŠู„ ูƒูŠููŠุฉ: 1. ุถุจุท [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) ุนู„ู‰ ู…ุฌู…ูˆุนุฉ ุจูŠุงู†ุงุช [WNUT 17](https://huggingface.co/datasets/wnut_17) ู„ู„ูƒุดู ุนู† ูƒูŠุงู†ุงุช ุฌุฏูŠุฏุฉ. 2. ุงุณุชุฎุฏุงู… ู†ู…ูˆุฐุฌูƒ ุงู„ู…ุถุจูˆุท ุจุฏู‚ุฉ ู„ู„ุงุณุชุฏู„ุงู„. <Tip> ู„ู„ุงุทู„ุงุน ุฌู…ูŠุน ุงู„ุจู†ู‰ ูˆุงู„ู†ู‚ุงุท ุงู„ู…ุชูˆุงูู‚ุฉ ู…ุน ู‡ุฐู‡ ุงู„ู…ู‡ู…ุฉุŒ ู†ูˆุตูŠ ุจุงู„ุฑุฌูˆุน ู…ู† [ุตูุญุฉ ุงู„ู…ู‡ู…ุฉ](https://huggingface.co/tasks/token-classification). </Tip> ู‚ุจู„ ุฃู† ุชุจุฏุฃุŒ ุชุฃูƒุฏ ู…ู† ุชุซุจูŠุช ุฌู…ูŠุน ุงู„ู…ูƒุชุจุงุช ุงู„ุถุฑูˆุฑูŠุฉ: ```bash pip install transformers datasets evaluate seqeval ``` ู†ุญู† ู†ุดุฌุนูƒ ุนู„ู‰ ุชุณุฌูŠู„ ุงู„ุฏุฎูˆู„ ุฅู„ู‰ ุญุณุงุจ HuggingFace ุงู„ุฎุงุต ุจูƒ ุญุชู‰ ุชุชู…ูƒู† ู…ู† ุชุญู…ูŠู„ ูˆู…ุดุงุฑูƒุฉ ู†ู…ูˆุฐุฌูƒ ู…ุน ุงู„ู…ุฌุชู…ุน. ุนู†ุฏู…ุง ูŠูุทู„ุจ ู…ู†ูƒุŒ ุฃุฏุฎู„ ุฑู…ุฒูƒ ู„ุชุณุฌูŠู„ ุงู„ุฏุฎูˆู„: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## ุชุญู…ูŠู„ ู…ุฌู…ูˆุนุฉ ุจูŠุงู†ุงุช WNUT 17 ุงุจุฏุฃ ุจุชุญู…ูŠู„ ู…ุฌู…ูˆุนุฉ ุจูŠุงู†ุงุช WNUT 17 ู…ู† ู…ูƒุชุจุฉ ๐Ÿค— Datasets: ```py >>> from datasets import load_dataset >>> wnut = load_dataset("wnut_17") ``` ุซู… ุฃู„ู‚ ู†ุธุฑุฉ ุนู„ู‰ ู…ุซุงู„: ```py >>> wnut["train"][0] {'id': '0', 'ner_tags': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 8, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0], 'tokens': ['@paulwalk', 'It', "'s", 'the', 'view', 'from', 'where', 'I', "'m", 'living', 'for', 'two', 'weeks', '.', 'Empire', 'State', 'Building', '=', 'ESB', '.', 'Pretty', 'bad', 'storm', 'here', 'last', 'evening', '.'] } ``` ูŠู…ุซู„ ูƒู„ ุฑู‚ู… ููŠ `ner_tags` ูƒูŠุงู†ุงู‹. ุญูˆู‘ู„ ุงู„ุฃุฑู‚ุงู… ุฅู„ู‰ ุฃุณู…ุงุก ุงู„ุชุตู†ูŠูุงุช ู„ู…ุนุฑูุฉ ู…ุงู‡ูŠุฉ ุงู„ูƒูŠุงู†ุงุช: ```py >>> label_list = wnut["train"].features[f"ner_tags"].feature.names >>> label_list [ "O", "B-corporation", "I-corporation", "B-creative-work", "I-creative-work", "B-group", "I-group", "B-location", "I-location", "B-person", "I-person", "B-product", "I-product", ] ``` ูŠุดูŠุฑ ุงู„ุญุฑู ุงู„ุฐูŠ ูŠุณุจู‚ ูƒู„ `ner_tag` ุฅู„ู‰ ู…ูˆุถุน ุงู„ุฑู…ุฒ ู„ู„ูƒูŠุงู†: - `B-` ูŠุดูŠุฑ ุฅู„ู‰ ุจุฏุงูŠุฉ ุงู„ูƒูŠุงู†. - `I-` ูŠุดูŠุฑ ุฅู„ู‰ ุฃู† ุงู„ุฑู…ุฒ ูŠู‚ุน ุถู…ู† ู†ูุณ ุงู„ูƒูŠุงู† (ุนู„ู‰ ุณุจูŠู„ ุงู„ู…ุซุงู„ุŒ ุงู„ุฑู…ุฒ `State` ู‡ูˆ ุฌุฒุก ู…ู† ูƒูŠุงู† ู…ุซู„ `Empire State Building`). - `0` ูŠุดูŠุฑ ุฅู„ู‰ ุฃู† ุงู„ุฑู…ุฒ ู„ุง ูŠู…ุซู„ ุฃูŠ ูƒูŠุงู†. ## ุงู„ู…ุนุงู„ุฌุฉ ุงู„ู…ุณุจู‚ุฉ(Preprocess) <Youtube id="iY2AZYdZAr0"/> ุงู„ุฎุทูˆุฉ ุงู„ุชุงู„ูŠุฉ ู‡ูŠ ุชุญู…ูŠู„ ู…ูุฌุฒูู‘ุฆ ุงู„ู†ุตูˆุต DistilBERT ู„ู„ู…ุนุงู„ุฌุฉ ุงู„ู…ุณุจู‚ุฉ ู„ุญู‚ู„ `tokens`: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` ูƒู…ุง ุฑุฃูŠุช ููŠ ุญู‚ู„ `tokens` ุงู„ู…ุซุงู„ ุฃุนู„ุงู‡ุŒ ูŠุจุฏูˆ ุฃู† ุงู„ู…ุฏุฎู„ ู‚ุฏ ุชู… ุชุญู„ูŠู„ู‡ ุจุงู„ูุนู„. ู„ูƒู† ุงู„ู…ุฏุฎู„ ู„ู… ูŠูุฌุฒุฃ ุจุนุฏ ูˆูŠุชุนูŠู‘ู† ุนู„ูŠูƒ ุถุจุท `is_split_into_words=True` ู„ุชู‚ุณูŠู… ุงู„ูƒู„ู…ุงุช ุฅู„ู‰ ูƒู„ู…ุงุช ูุฑุนูŠุฉ. ุนู„ู‰ ุณุจูŠู„ ุงู„ู…ุซุงู„: ```py >>> example = wnut["train"][0] >>> tokenized_input = tokenizer(example["tokens"], is_split_into_words=True) >>> tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"]) >>> tokens ['[CLS]', '@', 'paul', '##walk', 'it', "'", 's', 'the', 'view', 'from', 'where', 'i', "'", 'm', 'living', 'for', 'two', 'weeks', '.', 'empire', 'state', 'building', '=', 'es', '##b', '.', 'pretty', 'bad', 'storm', 'here', 'last', 'evening', '.', '[SEP]'] ``` ูˆู…ุน ุฐู„ูƒุŒ ูŠุถูŠู ู‡ุฐุง ุจุนุถ ุงู„ุฑู…ูˆุฒ ุงู„ุฎุงุตุฉ `[CLS]` ูˆ`[SEP]` ูˆุชู‚ุณูŠู… ุงู„ูƒู„ู…ุงุช ุฅู„ู‰ ุฃุฌุฒุงุก ูŠูู†ุดุฆ ุนุฏู… ุชุทุงุจู‚ ุจูŠู† ุงู„ู…ูุฏุฎู„ุงุช ูˆุงู„ุชุณู…ูŠุงุช. ู‚ุฏ ูŠุชู… ุชู‚ุณูŠู… ูƒู„ู…ุฉ ูˆุงุญุฏุฉ ุชู‚ุงุจู„ ุชุณู…ูŠุฉ ูˆุงุญุฏุฉ ุงู„ุขู† ุฅู„ู‰ ูƒู„ู…ุชูŠู† ูุฑุนูŠุชูŠู†. ุณุชุญุชุงุฌ ุฅู„ู‰ ุฅุนุงุฏุฉ ู…ุญุงุฐุงุฉ ุงู„ุฑู…ูˆุฒ ูˆุงู„ุชุณู…ูŠุงุช ุนู† ุทุฑูŠู‚: 1. ุฑุจุท ูƒู„ ุฑู…ุฒ ุจุงู„ูƒู„ู…ุฉ ุงู„ุฃุตู„ูŠุฉ ุจุงุณุชุฎุฏุงู… ุงู„ุฎุงุตูŠุฉ [`word_ids`](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.BatchEncoding.word_ids). 2. ุชุนูŠูŠู† ุงู„ุชุณู…ูŠุฉ `-100` ู„ู„ุฑู…ูˆุฒ ุงู„ุฎุงุตุฉ `[CLS]` ูˆ`[SEP]` ุจุญูŠุซ ูŠุชู… ุชุฌุงู‡ู„ู‡ุง ุจูˆุงุณุทุฉ ุฏุงู„ุฉ ุงู„ุฎุณุงุฑุฉ PyTorch (ุงู†ุธุฑ [CrossEntropyLoss](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html)). 3. ุชุณู…ูŠุฉ ุงู„ุฑู…ุฒ ุงู„ุฃูˆู„ ูู‚ุท ู„ูƒู„ู…ุฉ ู…ุนูŠู†ุฉ. ู‚ู… ุจุชุนูŠูŠู† `-100` ู„ุฃุฌุฒุงุก ุงู„ูƒู„ู…ุฉ ุงู„ุฃุฎุฑู‰. ู‡ู†ุง ูƒูŠู ูŠู…ูƒู†ูƒ ุฅู†ุดุงุก ูˆุธูŠูุฉ ู„ุฅุนุงุฏุฉ ู…ุญุงุฐุงุฉ ุงู„ุฑู…ูˆุฒ ูˆุงู„ุชุณู…ูŠุงุชุŒ ูˆู‚ุต ุงู„ุฌู…ู„ ู„ุชุชุฌุงูˆุฒ ุงู„ุญุฏ ุงู„ุฃู‚ุตู‰ ู„ุทูˆู„ ู…ูุฏุฎู„ุงุช DistilBERT: ```py >>> def tokenize_and_align_labels(examples): ... tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True) ... labels = [] ... for i, label in enumerate(examples[f"ner_tags"]): ... word_ids = tokenized_inputs.word_ids(batch_index=i) # ุชุนูŠูŠู† ุงู„ุฑู…ูˆุฒ ุฅู„ู‰ ูƒู„ู…ุงุชู‡ู… ุงู„ู…ู‚ุงุจู„ุฉ. ... previous_word_idx = None ... label_ids = [] ... for word_idx in word_ids: # ุชุนูŠูŠู† ุงู„ุฑู…ูˆุฒ ุงู„ุฎุงุตุฉ ุฅู„ู‰ -100. ... if word_idx is None: ... label_ids.append(-100) ... elif word_idx != previous_word_idx: # ุชุณู…ูŠุฉ ุงู„ุฑู…ุฒ ุงู„ุฃูˆู„ ูู‚ุท ู„ูƒู„ู…ุฉ ู…ุนูŠู†ุฉ. ... label_ids.append(label[word_idx]) ... else: ... label_ids.append(-100) ... previous_word_idx = word_idx ... labels.append(label_ids) ... tokenized_inputs["labels"] = labels ... return tokenized_inputs ``` ู„ุชุทุจูŠู‚ ู‡ุฐู‡ ุงู„ุนู…ู„ูŠุฉ ุนู„ู‰ ูƒุงู…ู„ ู…ุฌู…ูˆุนุฉ ุงู„ุจูŠุงู†ุงุชุŒ ุงุณุชุฎุฏู… ุงู„ุฏุงู„ุฉ [`~datasets.Dataset.map`] ู„ู…ุฌู…ูˆุนุฉ ุจูŠุงู†ุงุช ๐Ÿค—. ูŠู…ูƒู†ูƒ ุชุณุฑูŠุน ุงู„ุฏุงู„ุฉ `map` ุนู† ุทุฑูŠู‚ ุชุนูŠูŠู† `batched=True` ู„ู…ุนุงู„ุฌุฉ ุนู†ุงุตุฑ ู…ุชุนุฏุฏุฉ ู…ู† ู…ุฌู…ูˆุนุฉ ุงู„ุจูŠุงู†ุงุช ููŠ ูˆู‚ุช ูˆุงุญุฏ: ```py >>> tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True) ``` ุงู„ุขู† ู‚ู… ุจุฅู†ุดุงุก ุฏูุนุฉ ู…ู† ุงู„ุฃู…ุซู„ุฉ ุจุงุณุชุฎุฏุงู… [`DataCollatorWithPadding`].ู…ู† ุงู„ุฃูุถู„ ุงุณุชุฎุฏุงู… *ุงู„ุญุดูˆ ุงู„ุฏูŠู†ุงู…ูŠูƒูŠ* ู„ู„ุฌู…ู„ ุฅู„ู‰ ุฃุทูˆู„ ุทูˆู„ ููŠ ุฏูุนุฉ ุฃุซู†ุงุก ุงู„ุชุฌู…ูŠุนุŒ ุจุฏู„ุงู‹ ู…ู† ุญุดูˆ ู…ุฌู…ูˆุนุฉ ุงู„ุจูŠุงู†ุงุช ุจุงู„ูƒุงู…ู„ ุฅู„ู‰ ุงู„ุทูˆู„ ุงู„ุฃู‚ุตู‰. <frameworkcontent> <pt> ```py >>> from transformers import DataCollatorForTokenClassification >>> data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer) ``` </pt> <tf> ```py >>> from transformers import DataCollatorForTokenClassification >>> data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf") ``` </tf> </frameworkcontent> ## ุงู„ุชู‚ูŠูŠู…(Evaluate) ูŠูุนุฏู‘ ุชุถู…ูŠู† ู…ู‚ูŠุงุณ ุฃุซู†ุงุก ุงู„ุชุฏุฑูŠุจ ู…ููŠุฏู‹ุง ููŠ ุชู‚ูŠูŠู… ุฃุฏุงุก ู†ู…ูˆุฐุฌูƒ. ูŠู…ูƒู†ูƒ ุชุญู…ูŠู„ ุทุฑูŠู‚ุฉ ุชู‚ูŠูŠู… ุจุณุฑุนุฉ ู…ุน ู…ูƒุชุจุฉ ๐Ÿค— [Evaluate](https://huggingface.co/docs/evaluate/index). ู„ู‡ุฐู‡ ุงู„ู…ู‡ู…ุฉุŒ ู‚ู… ุจุชุญู…ูŠู„ ุฅุทุงุฑ [seqeval](https://huggingface.co/spaces/evaluate-metric/seqeval) (ุงู†ุธุฑ ุฌูˆู„ุฉ ๐Ÿค— Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) ู„ู…ุนุฑูุฉ ุงู„ู…ุฒูŠุฏ ุญูˆู„ ูƒูŠููŠุฉ ุชุญู…ูŠู„ ูˆุญุณุงุจ ู…ู‚ูŠุงุณ). ูŠูุฎุฑุฌ seqeval ุนุฏุฉ ู†ุชุงุฆุฌ: ุงู„ุฏู‚ุฉุŒ ูˆุงู„ุงุณุชุฐูƒุงุฑุŒ ูˆู…ู‚ูŠุงุณ F1ุŒ ูˆุงู„ุฏู‚ุฉ. ```py >>> import evaluate >>> seqeval = evaluate.load("seqeval") ``` ุงุญุตู„ ุนู„ู‰ ุชุณู…ูŠุงุช ุงู„ูƒูŠุงู†ุงุช ุงู„ู…ุณู…ุงุฉ (NER) ุฃูˆู„ุงู‹ุŒุซู… ุฃู†ุดุฆ ุฏุงู„ุฉ ุชูู…ุฑุฑ ุชู†ุจุคุงุชูƒ ูˆุชุณู…ูŠุงุชูƒ ุงู„ุตุญูŠุญุฉ ุฅู„ู‰ [`~evaluate.EvaluationModule.compute`] ู„ุญุณุงุจ ุงู„ู†ุชุงุฆุฌ: ```py >>> import numpy as np >>> labels = [label_list[i] for i in example[f"ner_tags"]] >>> def compute_metrics(p): ... predictions, labels = p ... predictions = np.argmax(predictions, axis=2) ... true_predictions = [ ... [label_list[p] for (p, l) in zip(prediction, label) if l != -100] ... for prediction, label in zip(predictions, labels) ... ] ... true_labels = [ ... [label_list[l] for (p, l) in zip(prediction, label) if l != -100] ... for prediction, label in zip(predictions, labels) ... ] ... results = seqeval.compute(predictions=true_predictions, references=true_labels) ... return { ... "precision": results["overall_precision"], ... "recall": results["overall_recall"], ... "f1": results["overall_f1"], ... "accuracy": results["overall_accuracy"], ... } ``` ุฏุงู„ุฉ `compute_metrics` ุฌุงู‡ุฒุฉ ู„ู„ุงุณุชุฎุฏุงู…ุŒ ูˆุณุชุญุชุงุฌ ุฅู„ูŠู‡ุง ุนู†ุฏ ุฅุนุฏุงุฏ ุงู„ุชุฏุฑูŠุจ. ## ุงู„ุชุฏุฑูŠุจ(Train) ู‚ุจู„ ุชุฏุฑูŠุจ ุงู„ู†ู…ูˆุฐุฌุŒ ุฌู‡ู‘ุฒ ุฎุฑูŠุทุฉ ุชุฑุจุท ุจูŠู† ุงู„ู…ุนุฑู‘ูุงุช ุงู„ู…ุชูˆู‚ุนุฉ ูˆุชุณู…ูŠุงุชู‡ุง ุจุงุณุชุฎุฏุงู… `id2label` ูˆ `label2id`: ```py >>> id2label = { ... 0: "O", ... 1: "B-corporation", ... 2: "I-corporation", ... 3: "B-creative-work", ... 4: "I-creative-work", ... 5: "B-group", ... 6: "I-group", ... 7: "B-location", ... 8: "I-location", ... 9: "B-person", ... 10: "I-person", ... 11: "B-product", ... 12: "I-product", ... } >>> label2id = { ... "O": 0, ... "B-corporation": 1, ... "I-corporation": 2, ... "B-creative-work": 3, ... "I-creative-work": 4, ... "B-group": 5, ... "I-group": 6, ... "B-location": 7, ... "I-location": 8, ... "B-person": 9, ... "I-person": 10, ... "B-product": 11, ... "I-product": 12, ... } ``` <frameworkcontent> <pt> <Tip> ุฅุฐุง ู„ู… ุชูƒู† ุนู„ู‰ ุฏุฑุงูŠุฉ ุจุชุนุฏูŠู„ ู†ู…ูˆุฐุฌ ุจุงุณุชุฎุฏุงู… [`Trainer`], ุฃู„ู‚ ู†ุธุฑุฉ ุนู„ู‰ ุงู„ุฏู„ูŠู„ ุงู„ุชุนู„ูŠู…ูŠ ุงู„ุฃุณุงุณูŠ [ู‡ู†ุง](../training#train-with-pytorch-trainer)! </Tip> ุฃู†ุช ู…ุณุชุนุฏ ุงู„ุขู† ู„ุจุฏุก ุชุฏุฑูŠุจ ู†ู…ูˆุฐุฌูƒ! ู‚ู… ุจุชุญู…ูŠู„ DistilBERT ู…ุน [`AutoModelForTokenClassification`] ุฅู„ู‰ ุฌุงู†ุจ ุนุฏุฏ ุงู„ุชุตู†ูŠูุงุช ุงู„ู…ุชูˆู‚ุนุฉุŒ ูˆุฎุฑูŠุทุฉ ุงู„ุชุณู…ูŠุงุช: ```py >>> from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer >>> model = AutoModelForTokenClassification.from_pretrained( ... "distilbert/distilbert-base-uncased", num_labels=13, id2label=id2label, label2id=label2id ... ) ``` ููŠ ู‡ุฐู‡ ุงู„ู…ุฑุญู„ุฉุŒ ู‡ู†ุงูƒ ุซู„ุงุซ ุฎุทูˆุงุช ูู‚ุท ู…ุชุจู‚ูŠุฉ: 1. ุญุฏุฏ ู…ุนู„ู…ุงุช ุงู„ุชุฏุฑูŠุจ ุงู„ุฎุงุตุฉ ุจูƒ ููŠ [`TrainingArguments`]. ุงู„ู…ุนุงู…ู„ ุงู„ูˆุญูŠุฏ ุงู„ู…ุทู„ูˆุจ ู‡ูˆ `output_dir` ุงู„ุฐูŠ ูŠุญุฏุฏ ู…ูƒุงู† ุญูุธ ู†ู…ูˆุฐุฌูƒ. ุณุชู‚ูˆู… ุจุฏูุน ู‡ุฐุง ุงู„ู†ู…ูˆุฐุฌ ุฅู„ู‰ Hub ุนู† ุทุฑูŠู‚ ุชุนูŠูŠู† `push_to_hub=True` (ูŠุฌุจ ุฃู† ุชูƒูˆู† ู…ุณุฌู„ุงู‹ ุงู„ุฏุฎูˆู„ ุฅู„ู‰ Hugging Face ู„ุชุญู…ูŠู„ ู†ู…ูˆุฐุฌูƒ). ููŠ ู†ู‡ุงูŠุฉ ูƒู„ ุญู‚ุจุฉุŒ ุณูŠู‚ูˆู… [`Trainer`] ุจุชู‚ูŠูŠู… ุฏุฑุฌุงุช seqeval ูˆุญูุธ ุชุณุฎุฉ ุงู„ุชุฏุฑูŠุจ. 2. ู‚ู… ุจุชู…ุฑูŠุฑ ู…ุนุงู…ู„ุงุช ุงู„ุชุฏุฑูŠุจ ุฅู„ู‰ [`Trainer`] ุฅู„ู‰ ุฌุงู†ุจ ุงู„ู†ู…ูˆุฐุฌุŒ ูˆู…ุฌู…ูˆุนุฉ ุงู„ุจูŠุงู†ุงุชุŒ ูˆุงู„ู…ูุฌุฒูู‘ุฆ ุงู„ู„ุบูˆูŠุŒ ูˆ`data collator`ุŒ ูˆุฏุงู„ุฉ `compute_metrics`. 3.ุงุณุชุฏุนู [`~Trainer.train`] ู„ุชุฏุฑูŠุจ ู†ู…ูˆุฐุฌูƒ. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_wnut_model", ... learning_rate=2e-5, ... per_device_train_batch_size=16, ... per_device_eval_batch_size=16, ... num_train_epochs=2, ... weight_decay=0.01, ... eval_strategy="epoch", ... save_strategy="epoch", ... load_best_model_at_end=True, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=tokenized_wnut["train"], ... eval_dataset=tokenized_wnut["test"], ... processing_class=tokenizer, ... data_collator=data_collator, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` ุจู…ุฌุฑุฏ ุงูƒุชู…ุงู„ ุงู„ุชุฏุฑูŠุจุŒ ุดุงุฑูƒ ู†ู…ูˆุฐุฌูƒ ุนู„ู‰ Hub ุจุงุณุชุฎุฏุงู… ุทุฑูŠู‚ุฉ [`~transformers.Trainer.push_to_hub`] ุญุชู‰ ูŠุชู…ูƒู† ุงู„ุฌู…ูŠุน ู…ู† ุงุณุชุฎุฏุงู… ู†ู…ูˆุฐุฌูƒ: ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> ุฅุฐุง ู„ู… ุชูƒู† ุนู„ู‰ ุฏุฑุงูŠุฉ ุจุชุนุฏูŠู„ ู†ู…ูˆุฐุฌ ุจุงุณุชุฎุฏุงู… KerasุŒ ุฃู„ู‚ ู†ุธุฑุฉ ุนู„ู‰ ุงู„ุฏู„ูŠู„ ุงู„ุชุนู„ูŠู…ูŠ ุงู„ุฃุณุงุณูŠ [ู‡ู†ุง](../training#train-a-tensorflow-model-with-keras)! </Tip> ู„ู„ุชุนุฏูŠู„ ุนู„ู‰ ู†ู…ูˆุฐุฌ ููŠ TensorFlowุŒ ุงุจุฏุฃ ุจุฅุนุฏุงุฏ ุฏุงู„ุฉ ู…ุญุณู†ุŒ ูˆุฌุฏูˆู„ ู…ุนุฏู„ ุงู„ุชุนู„ู…ุŒ ูˆุจุนุถ ู…ุนู„ู…ุงุช ุงู„ุชุฏุฑูŠุจ: ```py >>> from transformers import create_optimizer >>> batch_size = 16 >>> num_train_epochs = 3 >>> num_train_steps = (len(tokenized_wnut["train"]) // batch_size) * num_train_epochs >>> optimizer, lr_schedule = create_optimizer( ... init_lr=2e-5, ... num_train_steps=num_train_steps, ... weight_decay_rate=0.01, ... num_warmup_steps=0, ... ) ``` ุซู… ูŠู…ูƒู†ูƒ ุชุญู…ูŠู„ DistilBERT ู…ุน [`TFAutoModelForTokenClassification`] ุฅู„ู‰ ุฌุงู†ุจ ุนุฏุฏ ุงู„ุชุณู…ูŠุงุช ุงู„ู…ุชูˆู‚ุนุฉุŒ ูˆุชุฎุทูŠุทุงุช ุงู„ุชุณู…ูŠุงุช: ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained( ... "distilbert/distilbert-base-uncased", num_labels=13, id2label=id2label, label2id=label2id ... ) ``` ู‚ู… ุจุชุญูˆูŠู„ ู…ุฌู…ูˆุนุงุช ุจูŠุงู†ุงุชูƒ ุฅู„ู‰ ุชู†ุณูŠู‚ `tf.data.Dataset` ู…ุน [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]: ```py >>> tf_train_set = model.prepare_tf_dataset( ... tokenized_wnut["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_validation_set = model.prepare_tf_dataset( ... tokenized_wnut["validation"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` ู‡ูŠู‘ุฆ ุงู„ู†ู…ูˆุฐุฌ ู„ู„ุชุฏุฑูŠุจ ุจุงุณุชุฎุฏุงู… [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). ู„ุงุญุธ ุฃู† ู†ู…ุงุฐุฌ Transformers ุชุชุถู…ู† ุฏุงู„ุฉ ุฎุณุงุฑุฉ ุงูุชุฑุงุถูŠุฉ ู…ุฑุชุจุทุฉ ุจุงู„ู…ู‡ู…ุฉุŒ ู„ุฐู„ูƒ ู„ุง ุชุญุชุงุฌ ุฅู„ู‰ ุชุญุฏูŠุฏ ูˆุงุญุฏุฉ ุฅู„ุง ุฅุฐุง ูƒู†ุช ุชุฑุบุจ ููŠ ุฐู„ูƒ: ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) # No loss argument! ``` ุขุฎุฑ ุฃู…ุฑูŠู† ูŠุฌุจ ุฅุนุฏุงุฏู‡ู…ุง ู‚ุจู„ ุจุฏุก ุงู„ุชุฏุฑูŠุจ ู‡ูˆ ุญุณุงุจ ุฏุฑุฌุงุช seqeval ู…ู† ุงู„ุชู†ุจุคุงุชุŒ ูˆุชูˆููŠุฑ ุทุฑูŠู‚ุฉ ู„ุฏูุน ู†ู…ูˆุฐุฌูƒ ุฅู„ู‰ Hub. ูŠุชู… ุฐู„ูƒ ุจุงุณุชุฎุฏุงู… [Keras callbacks](../main_classes/keras_callbacks). ู…ุฑุฑ ุฏุงู„ุฉ `compute_metrics` ุงู„ุฎุงุตุฉ ุจูƒ ุฅู„ู‰ [`~transformers.KerasMetricCallback`]: ```py >>> from transformers.keras_callbacks import KerasMetricCallback >>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set) ``` ุญุฏุฏ ู…ูƒุงู† ุฏูุน ู†ู…ูˆุฐุฌูƒ ูˆุงู„ู…ุญู„ู„ ุงู„ู„ุบูˆูŠ ููŠ [`~transformers.PushToHubCallback`]: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="my_awesome_wnut_model", ... tokenizer=tokenizer, ... ) ``` ุซู… ุฌู…ู‘ุน callbacks ุงู„ุฎุงุตุฉ ุจูƒ ู…ุนู‹ุง: ```py >>> callbacks = [metric_callback, push_to_hub_callback] ``` ุฃุฎูŠุฑู‹ุงุŒ ุฃู†ุช ุฌุงู‡ุฒ ุงู„ุขู† ู„ุจุฏุก ุชุฏุฑูŠุจ ู†ู…ูˆุฐุฌูƒ! ู‚ู… ุจุงุณุชุฏุนุงุก [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) ู…ุน ุจูŠุงู†ุงุช ุงู„ุชุฏุฑูŠุจ ูˆุงู„ุชุญู‚ู‚ุŒ ูˆุนุฏุฏ ุงู„ุญู‚ุจุงุชุŒ ูˆcallbacks ู„ุชุนุฏูŠู„ ุงู„ู†ู…ูˆุฐุฌ: ```py >>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3, callbacks=callbacks) ``` ุจู…ุฌุฑุฏ ุงูƒุชู…ุงู„ ุงู„ุชุฏุฑูŠุจุŒ ูŠุชู… ุชุญู…ูŠู„ ู†ู…ูˆุฐุฌูƒ ุชู„ู‚ุงุฆูŠู‹ุง ุฅู„ู‰ Hub ุญุชู‰ ูŠุชู…ูƒู† ุงู„ุฌู…ูŠุน ู…ู† ุงุณุชุฎุฏุงู…ู‡! </tf> </frameworkcontent> <Tip> ู„ู„ุญุตูˆู„ ุนู„ู‰ ู…ุซุงู„ ุฃูƒุซุฑ ุชูุตูŠู„ุงู‹ ุญูˆู„ ูƒูŠููŠุฉ ุชุนุฏูŠู„ ู†ู…ูˆุฐุฌ ู„ุชุตู†ูŠู ุงู„ุฑู…ูˆุฒุŒ ุฃู„ู‚ ู†ุธุฑุฉ ุนู„ู‰ ุงู„ุฏูุชุฑ ุงู„ู…ู‚ุงุจู„ [ุฏูุชุฑ PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb) ุฃูˆ [ุฏูุชุฑ TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb). </Tip> ## ุงู„ุงุณุชุฏู„ุงู„(Inference) ุฑุงุฆุนุŒ ุงู„ุขู† ุจุนุฏ ุฃู† ู‚ู…ุช ุจุชุนุฏูŠู„ ู†ู…ูˆุฐุฌุŒ ูŠู…ูƒู†ูƒ ุงุณุชุฎุฏุงู…ู‡ ู„ู„ุงุณุชุฏู„ุงู„! ุงุญุตู„ ุนู„ู‰ ุจุนุถ ุงู„ู†ุตูˆุต ุงู„ุชูŠ ุชุฑูŠุฏ ุชุดุบูŠู„ ุงู„ุงุณุชุฏู„ุงู„ ุนู„ูŠู‡ุง: ```py >>> text = "The Golden State Warriors are an American professional basketball team based in San Francisco." ``` ุฃุจุณุท ุทุฑูŠู‚ุฉ ู„ุชุฌุฑุจุฉ ู†ู…ูˆุฐุฌูƒ ุงู„ู…ูุฏุฑุจ ู…ุณุจู‚ู‹ุง ู„ู„ุงุณุชุฏู„ุงู„ ู‡ูŠ ุงุณุชุฎุฏุงู…ู‡ ููŠ [`pipeline`]. ู‚ู… ุจุชู†ููŠุฐ `pipeline` ู„ุชุตู†ูŠู ุงู„ูƒูŠุงู†ุงุช ุงู„ู…ุณู…ุงุฉ ู…ุน ู†ู…ูˆุฐุฌูƒุŒ ูˆู…ุฑุฑ ู†ุตูƒ ุฅู„ูŠู‡: ```py >>> from transformers import pipeline >>> classifier = pipeline("ner", model="stevhliu/my_awesome_wnut_model") >>> classifier(text) [{'entity': 'B-location', 'score': 0.42658573, 'index': 2, 'word': 'golden', 'start': 4, 'end': 10}, {'entity': 'I-location', 'score': 0.35856336, 'index': 3, 'word': 'state', 'start': 11, 'end': 16}, {'entity': 'B-group', 'score': 0.3064001, 'index': 4, 'word': 'warriors', 'start': 17, 'end': 25}, {'entity': 'B-location', 'score': 0.65523505, 'index': 13, 'word': 'san', 'start': 80, 'end': 83}, {'entity': 'B-location', 'score': 0.4668663, 'index': 14, 'word': 'francisco', 'start': 84, 'end': 93}] ``` ูŠู…ูƒู†ูƒ ุฃูŠุถู‹ุง ุชูƒุฑุงุฑ ู†ุชุงุฆุฌ `pipeline` ูŠุฏูˆูŠู‹ุง ุฅุฐุง ุฃุฑุฏุช: <frameworkcontent> <pt> ู‚ุณู‘ู… ุงู„ู†ุต ุฅู„ู‰ ุฑู…ูˆุฒ ูˆุฃุฑุฌุน ุงู„ู…ููˆุชู‘ุฑุงุช ุจู„ุบุฉ PyTorch: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_wnut_model") >>> inputs = tokenizer(text, return_tensors="pt") ``` ู…ุฑุฑ ู…ุฏุฎู„ุงุชูƒ ุฅู„ู‰ ุงู„ู†ู…ูˆุฐุฌ ูˆุงุญุตู„ ุนู„ู‰ `logits`: ```py >>> from transformers import AutoModelForTokenClassification >>> model = AutoModelForTokenClassification.from_pretrained("stevhliu/my_awesome_wnut_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` ุงุณุชุฎุฑุฌ ุงู„ูุฆุฉ ุฐุงุช ุงู„ุงุญุชู…ุงู„ูŠุฉ ุงู„ุฃุนู„ู‰ุŒ ูˆุงุณุชุฎุฏู… ุฌุฏูˆู„ `id2label` ุงู„ุฎุงุตุฉ ุจุงู„ู†ู…ูˆุฐุฌ ู„ุชุญูˆูŠู„ู‡ุง ุฅู„ู‰ ุชุณู…ูŠุฉ ู†ุตูŠุฉ: ```py >>> predictions = torch.argmax(logits, dim=2) >>> predicted_token_class = [model.config.id2label[t.item()] for t in predictions[0]] >>> predicted_token_class ['O', 'O', 'B-location', 'I-location', 'B-group', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-location', 'B-location', 'O', 'O'] ``` </pt> <tf> ู‚ุณู‘ู… ุงู„ู†ุต ุฅู„ู‰ ุฑู…ูˆุฒ ูˆุฃุฑุฌุน ุงู„ู…ููˆุชู‘ุฑุงุช ุจ TensorFlow: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_wnut_model") >>> inputs = tokenizer(text, return_tensors="tf") ``` ู…ุฑุฑ ู…ุฏุฎู„ุงุชูƒ ุฅู„ู‰ ุงู„ู†ู…ูˆุฐุฌ ูˆุงุญุตู„ ุนู„ู‰ `logits`: ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained("stevhliu/my_awesome_wnut_model") >>> logits = model(**inputs).logits ``` ุงุณุชุฎุฑุฌ ุงู„ูุฆุฉ ุฐุงุช ุงู„ุงุญุชู…ุงู„ูŠุฉ ุงู„ุฃุนู„ู‰ุŒ ูˆุงุณุชุฎุฏู… ุฌุฏูˆู„ `id2label` ุงู„ุฎุงุตุฉ ุจุงู„ู†ู…ูˆุฐุฌ ู„ุชุญูˆูŠู„ู‡ุง ุฅู„ู‰ ุชุณู…ูŠุฉ ู†ุตูŠุฉ: ```py >>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1) >>> predicted_token_class = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()] >>> predicted_token_class ['O', 'O', 'B-location', 'I-location', 'B-group', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-location', 'B-location', 'O', 'O'] ``` </tf> </frameworkcontent>
transformers/docs/source/ar/tasks/token_classification.md/0
{ "file_path": "transformers/docs/source/ar/tasks/token_classification.md", "repo_id": "transformers", "token_count": 10300 }
381
<!--- Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Zu ๐Ÿค— Transformers beitragen Jeder ist willkommen, einen Beitrag zu leisten, und wir schรคtzen den Beitrag jedes Einzelnen. Codebeitrรคge sind nicht der einzige Weg, der Community zu helfen. Fragen zu beantworten, anderen zu helfen und die Dokumentation zu verbessern, sind ebenfalls รคuรŸerst wertvoll. Es hilft uns auch, wenn Sie das Projekt weiterempfehlen! Erwรคhnen Sie die Bibliothek in Blogposts รผber die groรŸartigen Projekte, die sie ermรถglicht hat, tweeten Sie, wenn sie Ihnen geholfen hat, oder hinterlassen Sie dem Repository ein โญ๏ธ, um Danke zu sagen. Wie auch immer Sie sich entscheiden beizutragen, seien Sie achtsam und respektieren Sie unseren [Verhaltenskodex](https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md). **Dieser Leitfaden wurde stark durch den fantastischen [scikit-learn-Leitfaden fรผr Beitrรคge](https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md) inspiriert.** ## Beitragsmรถglichkeiten Es gibt mehrere Wege, wie Sie zu ๐Ÿค— Transformers beitragen kรถnnen: * Beheben Sie bestehende Probleme im vorhandenen Code. * Erstellen Sie Issues im Zusammenhang mit Fehlern oder gewรผnschten neuen Funktionen. * Implementieren Sie neue Modelle. * Tragen Sie zu den Beispielen oder zur Dokumentation bei. Wenn Sie nicht wissen, wo Sie anfangen sollen, gibt es eine spezielle Liste von [Good First Issues](https://github.com/huggingface/transformers/contribute). Sie bietet Ihnen eine Liste offener und anfรคngerfreundlicher Probleme und hilft Ihnen, einen ersten Beitrag zu Open-Source zu leisten. Idealerweise erstellen Sie eine Pull-Anfrage und verlinken sie mit dem Issue, an dem Sie arbeiten mรถchten. Wir versuchen, erstellte PRs bevorzugt zu behandeln, da wir so den Fortschritt leicht verfolgen kรถnnen, und die Option besteht, dass jemand anderes den PR รผbernehmen kann, falls der Beitragende keine Zeit mehr hat. Fรผr etwas mehr Herausforderung, kรถnnen Sie auch einen Blick auf die Liste der [Good Second Issues](https://github.com/huggingface/transformers/labels/Good%20Second%20Issue) werfen. Generell gilt: Legen Sie los, wenn Sie sich den Anforderungen gewachsen sehen und wir helfen Ihnen dabei! ๐Ÿš€ > Alle Beitrรคge sind fรผr die Community gleichermaรŸen wertvoll. ๐Ÿฅฐ ## Bestehende Probleme beheben Wenn Ihnen ein Problem im vorhandenen Code auffรคllt und Sie eine Lรถsung im Sinn haben, kรถnnen Sie gerne einen Beitrag leisten und [eine Pull-Anfrage erstellen](#eine-pull-anfrage-erstellen)! ## Ein fehlerspezifisches Issue oder eine Feature-Anfrage erstellen Tun Sie Ihr Bestes, diesen Richtlinien zu folgen, wenn Sie ein fehlerspezifisches Issue erstellen oder eine Feature-Anfrage einreichen. Das macht es uns leichter, Ihnen schnell und mit gutem Feedback zu antworten. ### Haben Sie einen Fehler gefunden? Die ๐Ÿค— Transformers-Bibliothek verdankt ihre Robustheit und Zuverlรคssigkeit aller Nutzer, die frisch entdeckte Probleme melden. Wir wรผrden es wirklich schรคtzen, wenn Sie **sicherstellen kรถnnten, dass der Fehler noch nicht gemeldet wurde** (verwenden Sie die Suchleiste auf GitHub unter Issues), bevor Sie ein Issue erstellen. Ihr Problem sollte sich auch auf Fehler in der Bibliothek selbst und nicht auf Ihren eigenen Code beziehen. Wenn Sie sich nicht sicher sind, ob der Fehler in Ihrem eigenen Code oder der Bibliothek liegt, fragen Sie bitte zuerst im [Forum](https://discuss.huggingface.co/) nach. Das hilft uns, schneller auf Probleme im Zusammenhang mit der Bibliothek zu reagieren, anstatt auf allgemeine Fragen. Wenn Sie sich vergewissert haben, dass der Fehler noch nicht gemeldet wurde, geben Sie bitte die folgenden Informationen in Ihrem Issue an, damit wir es schnell beheben kรถnnen: * Ihr **Betriebssystem und Version** sowie die Versionen von **Python**, **PyTorch** und **TensorFlow**, falls zutreffend. * Ein kurzes und unabhรคngiges Code-Snippet, das es uns ermรถglicht, den Fehler in weniger als 30 Sekunden nachzustellen. * Den *vollstรคndigen* Traceback, wenn eine Ausnahme geworfen wird. * Fรผgen Sie weitere hilfreiche Informationen, wie z. B. Screenshots, an. Um das Betriebssystem und die Softwareversionen automatisch auszugeben, fรผhren Sie den folgenden Befehl aus: ```bash transformers env ``` Sie kรถnnen denselben Befehl auch im Hauptverzeichnis des Repositorys ausfรผhren: ```bash python src/transformers/commands/transformers_cli.py env ``` ### Mรถchten Sie eine neue Funktion? Wenn Sie eine bestimmte neue Funktion in ๐Ÿค— Transformers sehen mรถchten, erstellen Sie bitte ein Issue und fรผgen Sie eine Beschreibung hinzu: 1. Was ist die *Motivation* hinter dieser Funktion? Steht sie in Zusammenhang mit einem Problem oder einer Frustration mit der Bibliothek? Ist es eine Funktion, die Sie fรผr ein Projekt benรถtigen? Ist es etwas, an dem Sie gearbeitet haben und denken, dass es der Community nutzen kรถnnte? Was auch immer es ist, wir wรผrden uns freuen, davon zu hรถren! 1. Beschreiben Sie Ihre gewรผnschte Funktion so detailliert wie mรถglich. Je mehr Sie uns darรผber erzรคhlen kรถnnen, desto besser kรถnnen wir Ihnen helfen. 1. Stellen Sie einen *Code-Schnipsel* bereit, der die Funktionsweise demonstriert. 1. Falls die Funktion auf einem Paper beruht, verlinken Sie dieses bitte. Wenn Ihr Issue gut geschrieben ist, sind wir zum Zeitpunkt seiner Erstellung bereits zu 80 % fertig. Wir haben [Vorlagen](https://github.com/huggingface/transformers/tree/main/templates) hinzugefรผgt, um Ihnen den Start Ihres Issues zu erleichtern. ## Mรถchten Sie ein neues Modell implementieren? Es werden stรคndig neue Modelle verรถffentlicht. Wenn Sie ein neues Modell implementieren mรถchten, geben Sie bitte folgende Informationen an: * Eine kurze Beschreibung des Modells und einen Link zum Paper. * Link zur Implementierung, falls sie Open-Source ist. * Link zu den Modellgewichten, falls verfรผgbar. Lassen Sie es uns wissen, wenn Sie bereit sind, das Modell selbst beizutragen. Dann kรถnnen wir Ihnen helfen, es zu ๐Ÿค— Transformers hinzuzufรผgen! Wir haben auch einen technischen Leitfaden dazu, [wie man ein Modell zu ๐Ÿค— Transformers hinzufรผgt](https://huggingface.co/docs/transformers/add_new_model). ## Mรถchten Sie die Dokumentation erweitern? Wir sind immer auf der Suche nach Verbesserungen, die die Dokumentation klarer und prรคziser machen. Bitte teilen Sie uns Verbesserungsvorschlรคge mit, wie z. B. Tippfehler und fehlende, unklare oder ungenaue Inhalte. Wir รผbernehmen gerne die ร„nderungen oder helfen Ihnen, einen Beitrag zu leisten, wenn Sie daran interessiert sind! Fรผr weitere Einzelheiten darรผber, wie man die Dokumentation generiert, erstellt und schreibt, werfen Sie einen Blick auf das [README](https://github.com/huggingface/transformers/tree/main/docs) der Dokumentation. ## Eine Pull-Anfrage erstellen Bevor Sie irgendwelchen Code schreiben, empfehlen wir Ihnen dringend, die bestehenden PRs oder Issues zu durchsuchen, um sicherzustellen, dass niemand bereits an diesem Thema arbeitet. Wenn Sie sich unsicher sind, ist es immer eine gute Idee, nach Feedback in einem neuen Issue zu fragen. Sie benรถtigen grundlegende `git`-Kenntnisse, um zu ๐Ÿค— Transformers beizutragen. Obwohl `git` nicht das einfachste Werkzeug ist, hat es ein sehr gutes Handbuch. Geben Sie `git --help` in eine Shell ein und genieรŸen Sie es! Wenn Sie Bรผcher bevorzugen, ist [Pro Git](https://git-scm.com/book/en/v2) eine gute Anlaufstelle. Sie benรถtigen **[Python 3.9](https://github.com/huggingface/transformers/blob/main/setup.py#L426)** oder hรถher, um zu ๐Ÿค— Transformers beizutragen. Folgen Sie den nachstehenden Schritten, um mit dem Beitrag zu beginnen: 1. Forken Sie das [Repository](https://github.com/huggingface/transformers), indem Sie auf den **[Fork](https://github.com/huggingface/transformers/fork)**-Button auf der Seite des Repositorys klicken. Dadurch wird eine Kopie des Codes auf Ihrem GitHub-Account erstellt. 1. Klonen Sie Ihren Fork auf Ihre lokale Festplatte und fรผgen Sie das ursprรผngliche Repository als Remote hinzu: ```bash git clone git@github.com:<your Github handle>/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 1. Erstellen Sie einen neuen Branch, um Ihre ร„nderungen zu speichern: ```bash git checkout -b a-descriptive-name-for-my-changes ``` ๐Ÿšจ Arbeiten Sie **nicht** auf dem `main` Branch! 1. Richten Sie eine Entwicklungsumgebung ein, indem Sie den folgenden Befehl in einer virtuellen Umgebung ausfรผhren: ```bash pip install -e ".[dev]" ``` Wenn ๐Ÿค— Transformers bereits in der virtuellen Umgebung installiert war, entfernen Sie es mit `pip uninstall transformers`, bevor Sie es im bearbeitbaren Modus mit dem `-e` Flag neu installieren. Abhรคngig von Ihrem Betriebssystem und durch die wachsende Anzahl der optionalen Abhรคngigkeiten von Transformers kรถnnten Sie mit diesem Befehl einen Fehler verursachen. Wenn das der Fall ist, stellen Sie sicher, dass Sie ihr bevorzugtes Deep-Learning-Framework (PyTorch, TensorFlow und/oder Flax) installieren und anschlieรŸend den folgenden Befehl ausfรผhren: ```bash pip install -e ".[quality]" ``` Dies sollte fรผr die meisten Anwendungsfรคlle ausreichend sein. 1. Entwickeln Sie die Funktionen in Ihrem Branch. Wรคhrend Sie an Ihrem Code arbeiten, sollten Sie sicherstellen, dass die Test-Suite erfolgreich durchlรคuft. Fรผhren Sie die von Ihren ร„nderungen betroffenen Tests wie folgt aus: ```bash pytest tests/<TEST_TO_RUN>.py ``` Weitere Informationen รผber Tests finden Sie in der Anleitung zum Thema [Testen](https://huggingface.co/docs/transformers/testing). ๐Ÿค— Transformers stรผtzt sich auf `black` und `ruff`, um seinen Quellcode konsistent zu formatieren. Nachdem Sie ร„nderungen vorgenommen haben, wenden Sie automatische Stilkorrekturen und Codeprรผfungen, die nicht automatisiert werden kรถnnen, in einem Schritt an: ```bash make fixup ``` Dieser Task ist optimiert, nur mit Dateien zu arbeiten, die von Ihrer PR modifiziert wurden. Wenn Sie die Prรผfungen nacheinander ausfรผhren mรถchten, wendet der folgende Befehl die Stilkorrekturen an: ```bash make style ``` ๐Ÿค— Transformers verwendet auch `ruff` und einige benutzerdefinierte Skripte, um auf Programmierfehler zu prรผfen. Qualitรคtskontrollen werden von der CI durchgefรผhrt, aber Sie kรถnnen die gleichen รœberprรผfungen auch selbst ausfรผhren: ```bash make quality ``` AbschlieรŸend haben wir viele Skripte, die sicherstellen, dass wir alle betroffenen Dateien aktualisieren, wenn wir ein neues Modell hinzufรผgen. Sie kรถnnen diese wie folgt ausfรผhren: ```bash make repo-consistency ``` Um mehr รผber diese Prรผfungen zu erfahren und wie man mit ihnen Probleme behebt, lesen Sie den Leitfaden zu [รœberprรผfungen bei einer Pull-Anfrage](https://huggingface.co/docs/transformers/pr_checks). Wenn Sie Dokumente im Verzeichnis `docs/source` รคndern, stellen Sie sicher, dass die Dokumentation noch generiert werden kann. Diese Prรผfung wird auch im CI laufen, wenn Sie eine Pull-Anfrage erstellen. Um eine lokale Prรผfung durchzufรผhren, mรผssen Sie den Dukumentation-Builder installieren: ```bash pip install ".[docs]" ``` Fรผhren Sie den folgenden Befehl im Hauptverzeichnis des Repositorys aus: ```bash doc-builder build transformers docs/source/en --build_dir ~/tmp/test-build ``` Dadurch wird die Dokumentation im Ordner `~/tmp/test-build` erstellt, wo Sie die erzeugten Markdown-Dateien mit Ihrem bevorzugten Editor รผberprรผfen kรถnnen. Sie kรถnnen auch eine Vorschau der Dokumentation auf GitHub sehen, wenn Sie eine Pull-Anfrage รถffnen. Wenn Sie mit Ihren ร„nderungen zufrieden sind, fรผgen Sie die geรคnderten Dateien mit `git add` hinzu und speichern Sie Ihre ร„nderungen lokal mit `git commit`: ```bash git add modified_file.py git commit ``` Bitte achten Sie darauf, [gute Commit-Nachrichten](https://chris.beams.io/posts/git-commit/) zu schreiben, um die von Ihnen vorgenommenen ร„nderungen klar zu kommunizieren! Um Ihre Kopie des Codes auf dem aktuellen Stand des ursprรผnglichen Repositorys zu halten, rebasen Sie Ihren Branch auf `upstream/branch` *bevor* Sie eine Pull-Anfrage รถffnen oder falls Sie von einem Maintainer dazu aufgefordert werden: ```bash git fetch upstream git rebase upstream/main ``` Pushen Sie Ihre ร„nderungen in Ihrem Branch: ```bash git push -u origin a-descriptive-name-for-my-changes ``` Wenn Sie bereits eine Pull-Anfrage erstellt haben, mรผssen Sie den Push mit dem `--force` Flag erzwingen. Andernfalls, wenn die Pull-Anfrage noch nicht erstellt wurde, kรถnnen Sie Ihre ร„nderungen normal pushen. 1. Jetzt kรถnnen Sie zu Ihrem Fork des Repositorys auf GitHub gehen und auf **Pull-Anfrage** klicken, um eine Pull-Anfrage zu erstellen. Stellen Sie sicher, dass Sie alle Punkte auf unserer [Checkliste](#checkliste-fรผr-pull-anfragen) unten abhaken. Wenn Sie fertig sind, kรถnnen Sie Ihre ร„nderungen zur รœberprรผfung an die Projektverantwortlichen senden. 1. Es ist kein Problem, wenn die Maintainer ร„nderungen beantragen, das geschieht auch bei unseren Kernmitarbeitern! Damit jeder die ร„nderungen in der Pull-Anfrage sehen kann, arbeiten Sie in Ihrem lokalen Branch und pushen die ร„nderungen zu Ihrem Fork. Sie werden automatisch in der Pull-Anfrage erscheinen. ### Checkliste fรผr Pull-Anfragen โ˜ Der Titel der Pull-Anfrage sollte Ihren Beitrag zusammenfassen.<br> โ˜ Wenn Ihre Pull-Anfrage ein bestimmtes Issue bearbeitet, erwรคhnen Sie bitte die zugehรถrige Nummer in der Beschreibung der Pull-Anfrage, sodass diese verlinkt sind (und Personen, die das Issue lesen, wissen, dass Sie daran arbeiten).<br> โ˜ Um eine fortlaufende Bearbeitung anzuzeigen, versehen Sie bitte den Titel mit einem `[WIP]` Prรคfix. Diese sind nรผtzlich, um doppelte Arbeit zu verhindern und sie von PRs abzuheben, die bereit zum Zusammenfรผhren sind.<br> โ˜ Stellen Sie sicher, dass existierende Tests bestanden werden.<br> โ˜ Wenn Sie eine neue Funktion hinzufรผgen, erstellen Sie auch Tests dafรผr.<br> * Wenn Sie ein neues Modell hinzufรผgen, stellen Sie sicher, dass Sie `ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)` verwenden, um die gemeinsamen Tests auszulรถsen. * Wenn Sie neue `@slow` Tests hinzufรผgen, stellen Sie mit `RUN_SLOW=1 python -m pytest tests/models/my_new_model/test_my_new_model.py` sicher, dass diese erfolgreich durchlaufen. * Wenn Sie einen neuen Tokenizer hinzufรผgen, schreiben Sie Tests und stellen Sie mit `RUN_SLOW=1 python -m pytest tests/models/{your_model_name}/test_tokenization_{your_model_name}.py` sicher, dass diese erfolgreich durchlaufen. * CircleCI fรผhrt die langsamen Tests nicht aus, aber GitHub Actions tut dies jede Nacht!<br> โ˜ Alle public Methoden mรผssen informative Docstrings haben (siehe [`modeling_bert.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py) als Beispiel).<br> โ˜ Aufgrund des schnell wachsenden Repositorys fรผgen Sie bitte keine Bilder, Videos oder andere Nicht-Textdateien hinzu, die das Repository erheblich belasten wรผrden. Verwenden Sie stattdessen ein Hub-Repository wie [`hf-internal-testing`](https://huggingface.co/hf-internal-testing), um diese Dateien zu hosten und sie per URL zu verlinken. Wir empfehlen Bilder, die zur Dokumentation gehรถren, im folgenden Repository abzulegen: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). Sie kรถnnen eine PR in diesem Datasets-Repository erstellen und ein Hugging-Face-Mitglied bitten, sie zu mergen. Um mehr รผber die Prรผfungen zu erfahren, die bei einer Pull-Anfrage ausgelรถst werden, lesen Sie unseren Leitfaden zu [รœberprรผfungen bei einer Pull-Anfrage](https://huggingface.co/docs/transformers/pr_checks). ### Tests Eine umfangreiche Test-Suite ist enthalten, um das Verhalten der Bibliothek und mehrerer Beispiele zu testen. Tests fรผr die Bibliothek und Beispiele finden Sie jeweils im [tests](https://github.com/huggingface/transformers/tree/main/tests) und im [examples](https://github.com/huggingface/transformers/tree/main/examples) Ordner. Wir bevorzugen `pytest` und `pytest-xdist`, weil es schneller ist. Geben Sie einen *Pfad zu einem Unterordner oder einer Testdatei* vom Hauptverzeichnis des Repositorys aus an, um den Test auszufรผhren: ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model ``` Analog fรผr den `examples` Ordner, geben Sie einen *Pfad zu einem Unterordner oder einer Testdatei* an, um den Test auszufรผhren. Z. B. fรผhrt der folgende Befehl den Test des Unterordners fรผr Textklassifizierung im PyTorch `examples` Ordner durch: ```bash pip install -r examples/xxx/requirements.txt # nur beim ersten Mal erforderlich python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification ``` Tatsรคchlich ist dies genau, wie unsere `make test` und `make test-examples` Befehle implementiert sind (abgesehen von `pip install`)! Sie kรถnnen auch eine kleinere Anzahl an Tests angeben, um nur die Funktion, an der Sie arbeiten, zu testen. StandardmรครŸig werden langsame Tests รผbersprungen, aber Sie kรถnnen die Umgebungsvariable `RUN_SLOW` auf `yes` setzen, um sie auszufรผhren. Dies wird den Download vieler Gigabyte an Modellen starten - stellen Sie also sicher, dass Sie sowohl genรผgend Festplattenspeicher als auch eine gute Internetverbindung oder die nรถtige Geduld haben! <Tip warning={true}> Vergessen Sie nicht, einen *Pfad zu einem Unterordner oder einer Testdatei* anzugeben, um den Test auszufรผhren. Sonst fรผhren Sie alle Tests im `tests` oder `examples` Ordner aus, was sehr lange dauern wird! </Tip> ```bash RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification ``` Wie bei den langsamen Tests gibt es auch andere Umgebungsvariablen, die standardmรครŸig beim Testen nicht gesetzt sind: * `RUN_CUSTOM_TOKENIZERS`: Aktiviert Tests fรผr benutzerdefinierte Tokenizer. Weitere Umgebungsvariablen und zusรคtzliche Informationen finden Sie in der [testing_utils.py](src/transformers/testing_utils.py). ๐Ÿค— Transformers verwendet `pytest` nur als Test-Runner. Es verwendet keine `pytest`-spezifischen Funktionen in der Test-Suite selbst. Das bedeutet, `unittest` wird vollstรคndig unterstรผtzt. Folgend wird beschrieben, wie man Tests mit `unittest` ausfรผhrt: ```bash python -m unittest discover -s tests -t . -v python -m unittest discover -s examples -t examples -v ``` ### Stil-Leitfaden Fรผr Docstrings befolgt ๐Ÿค— Transformers den [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html). Lesen Sie unseren [Leitfaden zum Schreiben von Dokumentationen](https://github.com/huggingface/transformers/tree/main/docs#writing-documentation---specification) fรผr weitere Informationen. ### Entwickeln unter Windows Unter Windows (falls Sie nicht im [Windows-Subsystem fรผr Linux](https://learn.microsoft.com/en-us/windows/wsl/) oder WSL arbeiten) mรผssen Sie git so konfigurieren, dass Windows `CRLF` in Linux `LF` Zeilenenden umgewandelt werden: ```bash git config core.autocrlf input ``` Eine Mรถglichkeit, den `make`-Befehl unter Windows auszufรผhren, ist mit MSYS2: 1. Laden Sie [MSYS2](https://www.msys2.org/) herunter und installieren Sie es nach `C:\msys64`. 1. ร–ffnen Sie die Kommandozeile `C:\msys64\msys2.exe` (sie sollte vom **Start**-Menรผ aus verfรผgbar sein). 1. Fรผhren Sie den Befehl in der Shell aus: `pacman -Syu` und installieren Sie `make` mit `pacman -S make`. 1. Fรผgen Sie `C:\msys64\usr\bin` an Ihrer PATH-Umgebungsvariable an. Sie kรถnnen nun `make` aus jedem Terminal heraus verwenden (PowerShell, cmd.exe usw.)! ๐ŸŽ‰ ### Ein geforktes Repository mit dem Haupt-Repository von Hugging Face synchronisieren Beim Aktualisieren des main-Branches eines geforkten Repositories beachten Sie bitte die folgenden Schritte, um das Anpingen des Haupt-Repositorys zu vermeiden, was unnรถtige Verweise in abhรคngigen PRs vermerkt und beteiligte Entwickler benachrichtigt: 1. Wenn mรถglich, vermeiden Sie die Synchronisation mit dem Haupt-Repository รผber einen Branch und PR im geforkten Repository. Mergen Sie stattdessen direkt in den main-Branch des Forks. 1. Wenn ein PR unbedingt notwendig ist, verwenden Sie die folgenden Schritte, nachdem Sie Ihren Branch ausgecheckt haben: ```bash git checkout -b your-branch-for-syncing git pull --squash --no-commit upstream main git commit -m '<your message without GitHub references>' git push --set-upstream origin your-branch-for-syncing ```
transformers/docs/source/de/contributing.md/0
{ "file_path": "transformers/docs/source/de/contributing.md", "repo_id": "transformers", "token_count": 8105 }
382
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Accelerate [Accelerate](https://hf.co/docs/accelerate/index) is a library designed to simplify distributed training on any type of setup with PyTorch by uniting the most common frameworks ([Fully Sharded Data Parallel (FSDP)](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) and [DeepSpeed](https://www.deepspeed.ai/)) for it into a single interface. [`Trainer`] is powered by Accelerate under the hood, enabling loading big models and distributed training. This guide will show you two ways to use Accelerate with Transformers, using FSDP as the backend. The first method demonstrates distributed training with [`Trainer`], and the second method demonstrates adapting a PyTorch training loop. For more detailed information about Accelerate, please refer to the [documentation](https://hf.co/docs/accelerate/index). ```bash pip install accelerate ``` Start by running [accelerate config](https://hf.co/docs/accelerate/main/en/package_reference/cli#accelerate-config) in the command line to answer a series of prompts about your training system. This creates and saves a configuration file to help Accelerate correctly set up training based on your setup. ```bash accelerate config ``` Depending on your setup and the answers you provide, an example configuration file for distributing training with FSDP on one machine with two GPUs may look like the following. ```yaml compute_environment: LOCAL_MACHINE debug: false distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_forward_prefetch: false fsdp_cpu_ram_efficient_loading: true fsdp_offload_params: false fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_transformer_layer_cls_to_wrap: BertLayer fsdp_use_orig_params: true machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` ## Trainer Pass the path to the saved configuration file to [`TrainingArguments`], and from there, pass your [`TrainingArguments`] to [`Trainer`]. ```py from transformers import TrainingArguments, Trainer training_args = TrainingArguments( output_dir="your-model", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=2, fsdp_config="path/to/fsdp_config", fsdp="full_shard", weight_decay=0.01, eval_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, push_to_hub=True, ) trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"], eval_dataset=dataset["test"], processing_class=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) trainer.train() ``` ## Native PyTorch Accelerate can also be added to any PyTorch training loop to enable distributed training. The [`~accelerate.Accelerator`] is the main entry point for adapting your PyTorch code to work with Accelerate. It automatically detects your distributed training setup and initializes all the necessary components for training. You don't need to explicitly place your model on a device because [`~accelerate.Accelerator`] knows which device to move your model to. ```py from accelerate import Accelerator accelerator = Accelerator() device = accelerator.device ``` All PyTorch objects (model, optimizer, scheduler, dataloaders) should be passed to the [`~accelerate.Accelerator.prepare`] method now. This method moves your model to the appropriate device or devices, adapts the optimizer and scheduler to use [`~accelerate.optimizer.AcceleratedOptimizer`] and [`~accelerate.scheduler.AcceleratedScheduler`], and creates a new shardable dataloader. ```py train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( train_dataloader, eval_dataloader, model, optimizer ) ``` Replace `loss.backward` in your training loop with Accelerates [`~accelerate.Accelerator.backward`] method to scale the gradients and determine the appropriate `backward` method to use depending on your framework (for example, DeepSpeed or Megatron). ```py for epoch in range(num_epochs): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) ``` Combine everything into a function and make it callable as a script. ```py from accelerate import Accelerator def main(): accelerator = Accelerator() model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() scheduler.step() if __name__ == "__main__": main() ``` From the command line, call [accelerate launch](https://hf.co/docs/accelerate/main/en/package_reference/cli#accelerate-launch) to run your training script. Any additional arguments or parameters can be passed here as well. To launch your training script on two GPUs, add the `--num_processes` argument. ```bash accelerate launch --num_processes=2 your_script.py ``` Refer to the [Launching Accelerate scripts](https://hf.co/docs/accelerate/main/en/basic_tutorials/launch) for more details.
transformers/docs/source/en/accelerate.md/0
{ "file_path": "transformers/docs/source/en/accelerate.md", "repo_id": "transformers", "token_count": 2050 }
383
# Using Cursor as a client of transformers serve This example shows how to use `transformers serve` as a local LLM provider for [Cursor](https://cursor.com/), the popular IDE. In this particular case, requests to `transformers serve` will come from an external IP (Cursor's server IPs), which requires some additional setup. Furthermore, some of Cursor's requests require [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/CORS), which is disabled by default for security reasons. To launch a server with CORS enabled, run ```shell transformers serve --enable-cors ``` You'll also need to expose your server to external IPs. A potential solution is to use [`ngrok`](https://ngrok.com/), which has a permissive free tier. After setting up your `ngrok` account and authenticating on your server machine, you run ```shell ngrok http [port] ``` where `port` is the port used by `transformers serve` (`8000` by default). On the terminal where you launched `ngrok`, you'll see a https address in the "Forwarding" row, as in the image below. This is the address to send requests to. <h3 align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/transformers_serve_ngrok.png"/> </h3> You're now ready to set things up on the app side! In Cursor, while you can't set a new provider, you can change the endpoint for OpenAI requests in the model selection settings. First, navigate to "Settings" > "Cursor Settings", "Models" tab, and expand the "API Keys" collapsible. To set your `transformers serve` endpoint, follow this order: 1. Unselect ALL models in the list above (e.g. `gpt4`, ...); 2. Add and select the model you want to use (e.g. `Qwen/Qwen3-4B`) 3. Add some random text to OpenAI API Key. This field won't be used, but it canโ€™t be empty; 4. Add the https address from `ngrok` to the "Override OpenAI Base URL" field, appending `/v1` to the address (i.e. `https://(...).ngrok-free.app/v1`); 5. Hit "Verify". After you follow these steps, your "Models" tab should look like the image below. Your server should also have received a few requests from the verification step. <h3 align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/transformers_serve_cursor.png"/> </h3> You are now ready to use your local model in Cursor! For instance, if you toggle the AI Pane, you can select the model you added and ask it questions about your local files. <h3 align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/transformers_serve_cursor_chat.png"/> </h3>
transformers/docs/source/en/cursor.md/0
{ "file_path": "transformers/docs/source/en/cursor.md", "repo_id": "transformers", "token_count": 806 }
384
<!--- Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Installation Transformers works with [PyTorch](https://pytorch.org/get-started/locally/), [TensorFlow 2.0](https://www.tensorflow.org/install/pip), and [Flax](https://flax.readthedocs.io/en/latest/). It has been tested on Python 3.9+, PyTorch 2.1+, TensorFlow 2.6+, and Flax 0.4.1+. ## Virtual environment A virtual environment helps manage different projects and avoids compatibility issues between dependencies. Take a look at the [Install packages in a virtual environment using pip and venv](https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/) guide if you're unfamiliar with Python virtual environments. <hfoptions id="virtual"> <hfoption id="venv"> Create and activate a virtual environment in your project directory with [venv](https://docs.python.org/3/library/venv.html). ```bash python -m venv .env source .env/bin/activate ``` </hfoption> <hfoption id="uv"> [uv](https://docs.astral.sh/uv/) is a fast Rust-based Python package and project manager. ```bash uv venv .env source .env/bin/activate ``` </hfoption> </hfoptions> ## Python You can install Transformers with pip or uv. <hfoptions id="install"> <hfoption id="pip"> [pip](https://pip.pypa.io/en/stable/) is a package installer for Python. Install Transformers with pip in your newly created virtual environment. ```bash pip install transformers ``` </hfoption> <hfoption id="uv"> [uv](https://docs.astral.sh/uv/) is a fast Rust-based Python package and project manager. ```bash uv pip install transformers ``` </hfoption> </hfoptions> For GPU acceleration, install the appropriate CUDA drivers for [PyTorch](https://pytorch.org/get-started/locally) and [TensorFlow](https://www.tensorflow.org/install/pip). Run the command below to check if your system detects an NVIDIA GPU. ```bash nvidia-smi ``` To install a CPU-only version of Transformers and a machine learning framework, run the following command. <hfoptions id="cpu-only"> <hfoption id="PyTorch"> ```bash pip install 'transformers[torch]' uv pip install 'transformers[torch]' ``` </hfoption> <hfoption id="TensorFlow"> For Apple M1 hardware, you need to install CMake and pkg-config first. ```bash brew install cmake brew install pkg-config ``` Install TensorFlow 2.0. ```bash pip install 'transformers[tf-cpu]' uv pip install 'transformers[tf-cpu]' ``` </hfoption> <hfoption id="Flax"> ```bash pip install 'transformers[flax]' uv pip install 'transformers[flax]' ``` </hfoption> </hfoptions> Test whether the install was successful with the following command. It should return a label and score for the provided text. ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('hugging face is the best'))" [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ### Source install Installing from source installs the *latest* version rather than the *stable* version of the library. It ensures you have the most up-to-date changes in Transformers and it's useful for experimenting with the latest features or fixing a bug that hasn't been officially released in the stable version yet. The downside is that the latest version may not always be stable. If you encounter any problems, please open a [GitHub Issue](https://github.com/huggingface/transformers/issues) so we can fix it as soon as possible. Install from source with the following command. ```bash pip install git+https://github.com/huggingface/transformers ``` Check if the install was successful with the command below. It should return a label and score for the provided text. ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('hugging face is the best'))" [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ### Editable install An [editable install](https://pip.pypa.io/en/stable/topics/local-project-installs/#editable-installs) is useful if you're developing locally with Transformers. It links your local copy of Transformers to the Transformers [repository](https://github.com/huggingface/transformers) instead of copying the files. The files are added to Python's import path. ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` > [!WARNING] > You must keep the local Transformers folder to keep using it. Update your local version of Transformers with the latest changes in the main repository with the following command. ```bash cd ~/transformers/ git pull ``` ## conda [conda](https://docs.conda.io/projects/conda/en/stable/#) is a language-agnostic package manager. Install Transformers from the [conda-forge](https://anaconda.org/conda-forge/transformers) channel in your newly created virtual environment. ```bash conda install conda-forge::transformers ``` ## Set up After installation, you can configure the Transformers cache location or set up the library for offline usage. ### Cache directory When you load a pretrained model with [`~PreTrainedModel.from_pretrained`], the model is downloaded from the Hub and locally cached. Every time you load a model, it checks whether the cached model is up-to-date. If it's the same, then the local model is loaded. If it's not the same, the newer model is downloaded and cached. The default directory given by the shell environment variable `TRANSFORMERS_CACHE` is `~/.cache/huggingface/hub`. On Windows, the default directory is `C:\Users\username\.cache\huggingface\hub`. Cache a model in a different directory by changing the path in the following shell environment variables (listed by priority). 1. [HF_HUB_CACHE](https://hf.co/docs/huggingface_hub/package_reference/environment_variables#hfhubcache) or `TRANSFORMERS_CACHE` (default) 2. [HF_HOME](https://hf.co/docs/huggingface_hub/package_reference/environment_variables#hfhome) 3. [XDG_CACHE_HOME](https://hf.co/docs/huggingface_hub/package_reference/environment_variables#xdgcachehome) + `/huggingface` (only if `HF_HOME` is not set) Older versions of Transformers uses the shell environment variables `PYTORCH_TRANSFORMERS_CACHE` or `PYTORCH_PRETRAINED_BERT_CACHE`. You should keep these unless you specify the newer shell environment variable `TRANSFORMERS_CACHE`. ### Offline mode To use Transformers in an offline or firewalled environment requires the downloaded and cached files ahead of time. Download a model repository from the Hub with the [`~huggingface_hub.snapshot_download`] method. > [!TIP] > Refer to the [Download files from the Hub](https://hf.co/docs/huggingface_hub/guides/download) guide for more options for downloading files from the Hub. You can download files from specific revisions, download from the CLI, and even filter which files to download from a repository. ```py from huggingface_hub import snapshot_download snapshot_download(repo_id="meta-llama/Llama-2-7b-hf", repo_type="model") ``` Set the environment variable `HF_HUB_OFFLINE=1` to prevent HTTP calls to the Hub when loading a model. ```bash HF_HUB_OFFLINE=1 \ python examples/pytorch/language-modeling/run_clm.py --model_name_or_path meta-llama/Llama-2-7b-hf --dataset_name wikitext ... ``` Another option for only loading cached files is to set `local_files_only=True` in [`~PreTrainedModel.from_pretrained`]. ```py from transformers import LlamaForCausalLM model = LlamaForCausalLM.from_pretrained("./path/to/local/directory", local_files_only=True) ```
transformers/docs/source/en/installation.md/0
{ "file_path": "transformers/docs/source/en/installation.md", "repo_id": "transformers", "token_count": 2456 }
385
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Optimizing LLMs for Speed and Memory [[open-in-colab]] Large Language Models (LLMs) such as GPT3/4, [Falcon](https://huggingface.co/tiiuae/falcon-40b), and [Llama](https://huggingface.co/meta-llama/Llama-2-70b-hf) are rapidly advancing in their ability to tackle human-centric tasks, establishing themselves as essential tools in modern knowledge-based industries. Deploying these models in real-world tasks remains challenging, however: - To exhibit near-human text understanding and generation capabilities, LLMs currently require to be composed of billions of parameters (see [Kaplan et al](https://huggingface.co/papers/2001.08361), [Wei et. al](https://huggingface.co/papers/2206.07682)). This consequently amplifies the memory demands for inference. - In many real-world tasks, LLMs need to be given extensive contextual information. This necessitates the model's capability to manage very long input sequences during inference. The crux of these challenges lies in augmenting the computational and memory capabilities of LLMs, especially when handling expansive input sequences. In this guide, we will go over the effective techniques for efficient LLM deployment: 1. **Lower Precision:** Research has shown that operating at reduced numerical precision, namely [8-bit and 4-bit](./main_classes/quantization) can achieve computational advantages without a considerable decline in model performance. 2. **Flash Attention:** Flash Attention is a variation of the attention algorithm that not only provides a more memory-efficient approach but also realizes increased efficiency due to optimized GPU memory utilization. 3. **Architectural Innovations:** Considering that LLMs are always deployed in the same way during inference, namely autoregressive text generation with a long input context, specialized model architectures have been proposed that allow for more efficient inference. The most important advancement in model architectures hereby are [Alibi](https://huggingface.co/papers/2108.12409), [Rotary embeddings](https://huggingface.co/papers/2104.09864), [Multi-Query Attention (MQA)](https://huggingface.co/papers/1911.02150) and [Grouped-Query-Attention (GQA)](https://huggingface.co/papers/2305.13245). Throughout this guide, we will offer an analysis of auto-regressive generation from a tensor's perspective. We delve into the pros and cons of adopting lower precision, provide a comprehensive exploration of the latest attention algorithms, and discuss improved LLM architectures. While doing so, we run practical examples showcasing each of the feature improvements. ## 1. Lower Precision Memory requirements of LLMs can be best understood by seeing the LLM as a set of weight matrices and vectors and the text inputs as a sequence of vectors. In the following, the definition *weights* will be used to signify all model weight matrices and vectors. At the time of writing this guide, LLMs consist of at least a couple billion parameters. Each parameter thereby is made of a decimal number, e.g. `4.5689` which is usually stored in either [float32](https://en.wikipedia.org/wiki/Single-precision_floating-point_format), [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format), or [float16](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) format. This allows us to easily compute the memory requirement to load the LLM into memory: > *Loading the weights of a model having X billion parameters requires roughly 4 * X GB of VRAM in float32 precision* Nowadays, models are however rarely trained in full float32 precision, but usually in bfloat16 precision or less frequently in float16 precision. Therefore the rule of thumb becomes: > *Loading the weights of a model having X billion parameters requires roughly 2 * X GB of VRAM in bfloat16/float16 precision* For shorter text inputs (less than 1024 tokens), the memory requirement for inference is very much dominated by the memory requirement to load the weights. Therefore, for now, let's assume that the memory requirement for inference is equal to the memory requirement to load the model into the GPU VRAM. To give some examples of how much VRAM it roughly takes to load a model in bfloat16: - **GPT3** requires 2 \* 175 GB = **350 GB** VRAM - [**Bloom**](https://huggingface.co/bigscience/bloom) requires 2 \* 176 GB = **352 GB** VRAM - [**Llama-2-70b**](https://huggingface.co/meta-llama/Llama-2-70b-hf) requires 2 \* 70 GB = **140 GB** VRAM - [**Falcon-40b**](https://huggingface.co/tiiuae/falcon-40b) requires 2 \* 40 GB = **80 GB** VRAM - [**MPT-30b**](https://huggingface.co/mosaicml/mpt-30b) requires 2 \* 30 GB = **60 GB** VRAM - [**bigcode/starcoder**](https://huggingface.co/bigcode/starcoder) requires 2 \* 15.5 = **31 GB** VRAM As of writing this document, the largest GPU chip on the market is the A100 & H100 offering 80GB of VRAM. Most of the models listed before require more than 80GB just to be loaded and therefore necessarily require [tensor parallelism](https://huggingface.co/docs/transformers/perf_train_gpu_many#tensor-parallelism) and/or [pipeline parallelism](https://huggingface.co/docs/transformers/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism). ๐Ÿค— Transformers now supports tensor parallelism for supported models having `base_tp_plan` in their respective config classes. Learn more about Tensor Parallelism [here](perf_train_gpu_many#tensor-parallelism). Furthermore, if you're interested in writing models in a tensor-parallelism-friendly way, feel free to have a look at [the text-generation-inference library](https://github.com/huggingface/text-generation-inference/tree/main/server/text_generation_server/models/custom_modeling). Naive pipeline parallelism is supported out of the box. For this, simply load the model with `device="auto"` which will automatically place the different layers on the available GPUs as explained [here](https://huggingface.co/docs/accelerate/v0.22.0/en/concept_guides/big_model_inference). Note, however that while very effective, this naive pipeline parallelism does not tackle the issues of GPU idling. For this more advanced pipeline parallelism is required as explained [here](https://huggingface.co/docs/transformers/en/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism). If you have access to an 8 x 80GB A100 node, you could load BLOOM as follows ```bash !pip install transformers accelerate bitsandbytes optimum ``` ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("bigscience/bloom", device_map="auto", pad_token_id=0) ``` By using `device_map="auto"` the attention layers would be equally distributed over all available GPUs. In this guide, we will use [bigcode/octocoder](https://huggingface.co/bigcode/octocoder) as it can be run on a single 40 GB A100 GPU device chip. Note that all memory and speed optimizations that we will apply going forward, are equally applicable to models that require model or tensor parallelism. Since the model is loaded in bfloat16 precision, using our rule of thumb above, we would expect the memory requirement to run inference with `bigcode/octocoder` to be around 31 GB VRAM. Let's give it a try. We first load the model and tokenizer and then pass both to Transformers' [pipeline](https://huggingface.co/docs/transformers/main_classes/pipelines) object. ```python from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import torch model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", dtype=torch.bfloat16, device_map="auto", pad_token_id=0) tokenizer = AutoTokenizer.from_pretrained("bigcode/octocoder") pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) ``` ```python prompt = "Question: Please write a function in Python that transforms bytes to Giga bytes.\n\nAnswer:" result = pipe(prompt, max_new_tokens=60)[0]["generated_text"][len(prompt):] result ``` **Output**: ``` Here is a Python function that transforms bytes to Giga bytes:\n\n```python\ndef bytes_to_giga_bytes(bytes):\n return bytes / 1024 / 1024 / 1024\n```\n\nThis function takes a single ``` Nice, we can now directly use the result to convert bytes into Gigabytes. ```python def bytes_to_giga_bytes(bytes): return bytes / 1024 / 1024 / 1024 ``` Let's call [`torch.cuda.max_memory_allocated`](https://pytorch.org/docs/stable/generated/torch.cuda.max_memory_allocated.html) to measure the peak GPU memory allocation. ```python bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) ``` **Output**: ```bash 29.0260648727417 ``` Close enough to our back-of-the-envelope computation! We can see the number is not exactly correct as going from bytes to kilobytes requires a multiplication of 1024 instead of 1000. Therefore the back-of-the-envelope formula can also be understood as an "at most X GB" computation. Note that if we had tried to run the model in full float32 precision, a whopping 64 GB of VRAM would have been required. > Almost all models are trained in bfloat16 nowadays, there is no reason to run the model in full float32 precision if [your GPU supports bfloat16](https://discuss.pytorch.org/t/bfloat16-native-support/117155/5). Float32 won't give better inference results than the precision that was used to train the model. If you are unsure in which format the model weights are stored on the Hub, you can always look into the checkpoint's config under `"dtype"`, *e.g.* [here](https://huggingface.co/meta-llama/Llama-2-7b-hf/blob/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/config.json#L21). It is recommended to set the model to the same precision type as written in the config when loading with `from_pretrained(..., dtype=...)` except when the original type is float32 in which case one can use both `float16` or `bfloat16` for inference. Let's define a `flush(...)` function to free all allocated memory so that we can accurately measure the peak allocated GPU memory. ```python del pipe del model import gc import torch def flush(): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() ``` Let's call it now for the next experiment. ```python flush() ``` From the Accelerate library, you can also use a device-agnostic utility method called [release_memory](https://github.com/huggingface/accelerate/blob/29be4788629b772a3b722076e433b5b3b5c85da3/src/accelerate/utils/memory.py#L63), which takes various hardware backends like XPU, MLU, NPU, MPS, and more into account. ```python from accelerate.utils import release_memory # ... release_memory(model) ``` Now what if your GPU does not have 32 GB of VRAM? It has been found that model weights can be quantized to 8-bit or 4-bits without a significant loss in performance (see [Dettmers et al.](https://huggingface.co/papers/2208.07339)). Model can be quantized to even 3 or 2 bits with an acceptable loss in performance as shown in the recent [GPTQ paper](https://huggingface.co/papers/2210.17323) ๐Ÿคฏ. Without going into too many details, quantization schemes aim at reducing the precision of weights while trying to keep the model's inference results as accurate as possible (*a.k.a* as close as possible to bfloat16). Note that quantization works especially well for text generation since all we care about is choosing the *set of most likely next tokens* and don't really care about the exact values of the next token *logit* distribution. All that matters is that the next token *logit* distribution stays roughly the same so that an `argmax` or `topk` operation gives the same results. There are various quantization techniques, which we won't discuss in detail here, but in general, all quantization techniques work as follows: - 1. Quantize all weights to the target precision - 2. Load the quantized weights, and pass the input sequence of vectors in bfloat16 precision - 3. Dynamically dequantize weights to bfloat16 to perform the computation with their input vectors in bfloat16 precision In a nutshell, this means that *inputs-weight matrix* multiplications, with \\( X \\) being the *inputs*, \\( W \\) being a weight matrix and \\( Y \\) being the output: $$ Y = X * W $$ are changed to $$ Y = X * \text{dequantize}(W) $$ for every matrix multiplication. Dequantization and re-quantization is performed sequentially for all weight matrices as the inputs run through the network graph. Therefore, inference time is often **not** reduced when using quantized weights, but rather increases. Enough theory, let's give it a try! To quantize the weights with Transformers, you need to make sure that the [`bitsandbytes`](https://github.com/bitsandbytes-foundation/bitsandbytes) library is installed. ```bash !pip install bitsandbytes ``` We can then load models in 8-bit quantization by simply adding a `load_in_8bit=True` flag to `from_pretrained`. ```python model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", load_in_8bit=True, pad_token_id=0) ``` Now, let's run our example again and measure the memory usage. ```python pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) result = pipe(prompt, max_new_tokens=60)[0]["generated_text"][len(prompt):] result ``` **Output**: ``` Here is a Python function that transforms bytes to Giga bytes:\n\n```python\ndef bytes_to_giga_bytes(bytes):\n return bytes / 1024 / 1024 / 1024\n```\n\nThis function takes a single ``` Nice, we're getting the same result as before, so no loss in accuracy! Let's look at how much memory was used this time. ```python bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) ``` **Output**: ``` 15.219234466552734 ``` Significantly less! We're down to just a bit over 15 GBs and could therefore run this model on consumer GPUs like the 4090. We're seeing a very nice gain in memory efficiency and more or less no degradation to the model's output. However, we can also notice a slight slow-down during inference. We delete the models and flush the memory again. ```python del model del pipe ``` ```python flush() ``` Let's see what peak GPU memory consumption 4-bit quantization gives. Quantizing the model to 4-bit can be done with the same API as before - this time by passing `load_in_4bit=True` instead of `load_in_8bit=True`. ```python model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", load_in_4bit=True, pad_token_id=0) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) result = pipe(prompt, max_new_tokens=60)[0]["generated_text"][len(prompt):] result ``` **Output**: ``` Here is a Python function that transforms bytes to Giga bytes:\n\n```\ndef bytes_to_gigabytes(bytes):\n return bytes / 1024 / 1024 / 1024\n```\n\nThis function takes a single argument ``` We're almost seeing the same output text as before - just the `python` is missing just before the code snippet. Let's see how much memory was required. ```python bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) ``` **Output**: ``` 9.543574333190918 ``` Just 9.5GB! That's really not a lot for a >15 billion parameter model. While we see very little degradation in accuracy for our model here, 4-bit quantization can in practice often lead to different results compared to 8-bit quantization or full `bfloat16` inference. It is up to the user to try it out. Also note that inference here was again a bit slower compared to 8-bit quantization which is due to the more aggressive quantization method used for 4-bit quantization leading to \\( \text{quantize} \\) and \\( \text{dequantize} \\) taking longer during inference. ```python del model del pipe ``` ```python flush() ``` Overall, we saw that running OctoCoder in 8-bit precision reduced the required GPU VRAM from 32G GPU VRAM to only 15GB and running the model in 4-bit precision further reduces the required GPU VRAM to just a bit over 9GB. 4-bit quantization allows the model to be run on GPUs such as RTX3090, V100, and T4 which are quite accessible for most people. For more information on quantization and to see how one can quantize models to require even less GPU VRAM memory than 4-bit, we recommend looking into the [`AutoGPTQ`](https://huggingface.co/docs/transformers/main/en/main_classes/quantization#autogptq-integration%60) implementation. > As a conclusion, it is important to remember that model quantization trades improved memory efficiency against accuracy and in some cases inference time. If GPU memory is not a constraint for your use case, there is often no need to look into quantization. However many GPUs simply can't run LLMs without quantization methods and in this case, 4-bit and 8-bit quantization schemes are extremely useful tools. For more in-detail usage information, we strongly recommend taking a look at the [Transformers Quantization Docs](https://huggingface.co/docs/transformers/main_classes/quantization#general-usage). Next, let's look into how we can improve computational and memory efficiency by using better algorithms and an improved model architecture. ## 2. Flash Attention Today's top-performing LLMs share more or less the same fundamental architecture that consists of feed-forward layers, activation layers, layer normalization layers, and most crucially, self-attention layers. Self-attention layers are central to Large Language Models (LLMs) in that they enable the model to understand the contextual relationships between input tokens. However, the peak GPU memory consumption for self-attention layers grows *quadratically* both in compute and memory complexity with number of input tokens (also called *sequence length*) that we denote in the following by \\( N \\) . While this is not really noticeable for shorter input sequences (of up to 1000 input tokens), it becomes a serious problem for longer input sequences (at around 16000 input tokens). Let's take a closer look. The formula to compute the output \\( \mathbf{O} \\) of a self-attention layer for an input \\( \mathbf{X} \\) of length \\( N \\) is: $$ \textbf{O} = \text{Attn}(\mathbf{X}) = \mathbf{V} \times \text{Softmax}(\mathbf{QK}^T) \text{ with } \mathbf{Q} = \mathbf{W}_q \mathbf{X}, \mathbf{V} = \mathbf{W}_v \mathbf{X}, \mathbf{K} = \mathbf{W}_k \mathbf{X} $$ \\( \mathbf{X} = (\mathbf{x}_1, ... \mathbf{x}_{N}) \\) is thereby the input sequence to the attention layer. The projections \\( \mathbf{Q} \\) and \\( \mathbf{K} \\) will each consist of \\( N \\) vectors resulting in the \\( \mathbf{QK}^T \\) being of size \\( N^2 \\) . LLMs usually have multiple attention heads, thus doing multiple self-attention computations in parallel. Assuming, the LLM has 40 attention heads and runs in bfloat16 precision, we can calculate the memory requirement to store the \\( \mathbf{QK^T} \\) matrices to be \\( 40 * 2 * N^2 \\) bytes. For \\( N=1000 \\) only around 50 MB of VRAM are needed, however, for \\( N=16000 \\) we would need 19 GB of VRAM, and for \\( N=100,000 \\) we would need almost 1TB just to store the \\( \mathbf{QK}^T \\) matrices. Long story short, the default self-attention algorithm quickly becomes prohibitively memory-expensive for large input contexts. As LLMs improve in text comprehension and generation, they are applied to increasingly complex tasks. While models once handled the translation or summarization of a few sentences, they now manage entire pages, demanding the capability to process extensive input lengths. How can we get rid of the exorbitant memory requirements for large input lengths? We need a new way to compute the self-attention mechanism that gets rid of the \\( QK^T \\) matrix. [Tri Dao et al.](https://huggingface.co/papers/2205.14135) developed exactly such a new algorithm and called it **Flash Attention**. In a nutshell, Flash Attention breaks the \\(\mathbf{V} \times \text{Softmax}(\mathbf{QK}^T\\)) computation apart and instead computes smaller chunks of the output by iterating over multiple softmax computation steps: $$ \textbf{O}_i \leftarrow s^a_{ij} * \textbf{O}_i + s^b_{ij} * \mathbf{V}_{j} \times \text{Softmax}(\mathbf{QK}^T_{i,j}) \text{ for multiple } i, j \text{ iterations} $$ with \\( s^a_{ij} \\) and \\( s^b_{ij} \\) being some softmax normalization statistics that need to be recomputed for every \\( i \\) and \\( j \\) . Please note that the whole Flash Attention is a bit more complex and is greatly simplified here as going in too much depth is out of scope for this guide. The reader is invited to take a look at the well-written [Flash Attention paper](https://huggingface.co/papers/2205.14135) for more details. The main takeaway here is: > By keeping track of softmax normalization statistics and by using some smart mathematics, Flash Attention gives **numerical identical** outputs compared to the default self-attention layer at a memory cost that only increases linearly with \\( N \\) . Looking at the formula, one would intuitively say that Flash Attention must be much slower compared to the default self-attention formula as more computation needs to be done. Indeed Flash Attention requires more FLOPs compared to normal attention as the softmax normalization statistics have to constantly be recomputed (see [paper](https://huggingface.co/papers/2205.14135) for more details if interested) > However, Flash Attention is much faster in inference compared to default attention which comes from its ability to significantly reduce the demands on the slower, high-bandwidth memory of the GPU (VRAM), focusing instead on the faster on-chip memory (SRAM). Essentially, Flash Attention makes sure that all intermediate write and read operations can be done using the fast *on-chip* SRAM memory instead of having to access the slower VRAM memory to compute the output vector \\( \mathbf{O} \\) . In practice, there is currently absolutely no reason to **not** use Flash Attention if available. The algorithm gives mathematically the same outputs, and is both faster and more memory-efficient. Let's look at a practical example. Our OctoCoder model now gets a significantly longer input prompt which includes a so-called *system prompt*. System prompts are used to steer the LLM into a better assistant that is tailored to the users' task. In the following, we use a system prompt that will make OctoCoder a better coding assistant. ```python system_prompt = """Below are a series of dialogues between various people and an AI technical assistant. The assistant tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble but knowledgeable. The assistant is happy to help with code questions and will do their best to understand exactly what is needed. It also tries to avoid giving false or misleading information, and it caveats when it isn't entirely sure about the right answer. That said, the assistant is practical really does its best, and doesn't let caution get too much in the way of being useful. The Starcoder models are a series of 15.5B parameter models trained on 80+ programming languages from The Stack (v1.2) (excluding opt-out requests). The model uses Multi Query Attention, was trained using the Fill-in-the-Middle objective, and with 8,192 tokens context window for a trillion tokens of heavily deduplicated data. ----- Question: Write a function that takes two lists and returns a list that has alternating elements from each input list. Answer: Sure. Here is a function that does that. def alternating(list1, list2): results = [] for i in range(len(list1)): results.append(list1[i]) results.append(list2[i]) return results Question: Can you write some test cases for this function? Answer: Sure, here are some tests. assert alternating([10, 20, 30], [1, 2, 3]) == [10, 1, 20, 2, 30, 3] assert alternating([True, False], [4, 5]) == [True, 4, False, 5] assert alternating([], []) == [] Question: Modify the function so that it returns all input elements when the lists have uneven length. The elements from the longer list should be at the end. Answer: Here is the modified function. def alternating(list1, list2): results = [] for i in range(min(len(list1), len(list2))): results.append(list1[i]) results.append(list2[i]) if len(list1) > len(list2): results.extend(list1[i+1:]) else: results.extend(list2[i+1:]) return results ----- """ ``` For demonstration purposes, we duplicate the system prompt by ten so that the input length is long enough to observe Flash Attention's memory savings. We append the original text prompt `"Question: Please write a function in Python that transforms bytes to Giga bytes.\n\nAnswer: Here"` ```python long_prompt = 10 * system_prompt + prompt ``` We instantiate our model again in bfloat16 precision. ```python model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", dtype=torch.bfloat16, device_map="auto") tokenizer = AutoTokenizer.from_pretrained("bigcode/octocoder") pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) ``` Let's now run the model just like before *without Flash Attention* and measure the peak GPU memory requirement and inference time. ```python import time start_time = time.time() result = pipe(long_prompt, max_new_tokens=60)[0]["generated_text"][len(long_prompt):] print(f"Generated in {time.time() - start_time} seconds.") result ``` **Output**: ``` Generated in 10.96854019165039 seconds. Sure. Here is a function that does that.\n\ndef bytes_to_giga(bytes):\n return bytes / 1024 / 1024 / 1024\n\nAnswer: Sure. Here is a function that does that.\n\ndef ```` We're getting the same output as before, however this time, the model repeats the answer multiple times until it's 60 tokens cut-off. This is not surprising as we've repeated the system prompt ten times for demonstration purposes and thus cued the model to repeat itself. **Note** that the system prompt should not be repeated ten times in real-world applications - one time is enough! Let's measure the peak GPU memory requirement. ```python bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) ``` **Output**: ```bash 37.668193340301514 ``` As we can see the peak GPU memory requirement is now significantly higher than in the beginning, which is largely due to the longer input sequence. Also the generation takes a little over a minute now. We call `flush()` to free GPU memory for our next experiment. ```python flush() ``` For comparison, let's run the same function, but enable Flash Attention instead. To do so, we convert the model to [BetterTransformer](https://huggingface.co/docs/optimum/bettertransformer/overview) and by doing so enabling PyTorch's [SDPA self-attention](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention) which in turn is able to use Flash Attention. ```python model.to_bettertransformer() ``` Now we run the exact same code snippet as before and under the hood Transformers will make use of Flash Attention. ```py start_time = time.time() with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): result = pipe(long_prompt, max_new_tokens=60)[0]["generated_text"][len(long_prompt):] print(f"Generated in {time.time() - start_time} seconds.") result ``` **Output**: ``` Generated in 3.0211617946624756 seconds. Sure. Here is a function that does that.\n\ndef bytes_to_giga(bytes):\n return bytes / 1024 / 1024 / 1024\n\nAnswer: Sure. Here is a function that does that.\n\ndef ``` We're getting the exact same result as before, but can observe a very significant speed-up thanks to Flash Attention. Let's measure the memory consumption one last time. ```python bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) ``` **Output**: ``` 32.617331981658936 ``` And we're almost back to our original 29GB peak GPU memory from the beginning. We can observe that we only use roughly 100MB more GPU memory when passing a very long input sequence with Flash Attention compared to passing a short input sequence as done in the beginning. ```py flush() ``` For more information on how to use Flash Attention, please have a look at [this doc page](https://huggingface.co/docs/transformers/en/perf_infer_gpu_one#flashattention-2). ## 3. Architectural Innovations So far we have looked into improving computational and memory efficiency by: - Casting the weights to a lower precision format - Replacing the self-attention algorithm with a more memory- and compute efficient version Let's now look into how we can change the architecture of an LLM so that it is most effective and efficient for task that require long text inputs, *e.g.*: - Retrieval augmented Questions Answering, - Summarization, - Chat Note that *chat* not only requires the LLM to handle long text inputs, but it also necessitates that the LLM is able to efficiently handle the back-and-forth dialogue between user and assistant (such as ChatGPT). Once trained, the fundamental LLM architecture is difficult to change, so it is important to make considerations about the LLM's tasks beforehand and accordingly optimize the model's architecture. There are two important components of the model architecture that quickly become memory and/or performance bottlenecks for large input sequences. - The positional embeddings - The key-value cache Let's go over each component in more detail ### 3.1 Improving positional embeddings of LLMs Self-attention puts each token in relation to each other's tokens. As an example, the \\( \text{Softmax}(\mathbf{QK}^T) \\) matrix of the text input sequence *"Hello", "I", "love", "you"* could look as follows: ![](/blog/assets/163_optimize_llm/self_attn_tokens.png) Each word token is given a probability mass at which it attends all other word tokens and, therefore is put into relation with all other word tokens. E.g. the word *"love"* attends to the word *"Hello"* with 5%, to *"I"* with 30%, and to itself with 65%. A LLM based on self-attention, but without position embeddings would have great difficulties in understanding the positions of the text inputs to each other. This is because the probability score computed by \\( \mathbf{QK}^T \\) relates each word token to each other word token in \\( O(1) \\) computations regardless of their relative positional distance to each other. Therefore, for the LLM without position embeddings each token appears to have the same distance to all other tokens, *e.g.* differentiating between *"Hello I love you"* and *"You love I hello"* would be very challenging. For the LLM to understand sentence order, an additional *cue* is needed and is usually applied in the form of *positional encodings* (or also called *positional embeddings*). Positional encodings, encode the position of each token into a numerical presentation that the LLM can leverage to better understand sentence order. The authors of the [*Attention Is All You Need*](https://huggingface.co/papers/1706.03762) paper introduced sinusoidal positional embeddings \\( \mathbf{P} = \mathbf{p}_1, \ldots, \mathbf{p}_N \\) . where each vector \\( \mathbf{p}_i \\) is computed as a sinusoidal function of its position \\( i \\) . The positional encodings are then simply added to the input sequence vectors \\( \mathbf{\hat{X}} = \mathbf{\hat{x}}_1, \ldots, \mathbf{\hat{x}}_N \\) = \\( \mathbf{x}_1 + \mathbf{p}_1, \ldots, \mathbf{x}_N + \mathbf{p}_N \\) thereby cueing the model to better learn sentence order. Instead of using fixed position embeddings, others (such as [Devlin et al.](https://huggingface.co/papers/1810.04805)) used learned positional encodings for which the positional embeddings \\( \mathbf{P} \\) are learned during training. Sinusoidal and learned position embeddings used to be the predominant methods to encode sentence order into LLMs, but a couple of problems related to these positional encodings were found: 1. Sinusoidal and learned position embeddings are both absolute positional embeddings, *i.e.* encoding a unique embedding for each position id: \\( 0, \ldots, N \\) . As shown by [Huang et al.](https://huggingface.co/papers/2009.13658) and [Su et al.](https://huggingface.co/papers/2104.09864), absolute positional embeddings lead to poor LLM performance for long text inputs. For long text inputs, it is advantageous if the model learns the relative positional distance input tokens have to each other instead of their absolute position. 2. When using learned position embeddings, the LLM has to be trained on a fixed input length \\( N \\), which makes it difficult to extrapolate to an input length longer than what it was trained on. Recently, relative positional embeddings that can tackle the above mentioned problems have become more popular, most notably: - [Rotary Position Embedding (RoPE)](https://huggingface.co/papers/2104.09864) - [ALiBi](https://huggingface.co/papers/2108.12409) Both *RoPE* and *ALiBi* argue that it's best to cue the LLM about sentence order directly in the self-attention algorithm as it's there that word tokens are put into relation with each other. More specifically, sentence order should be cued by modifying the \\( \mathbf{QK}^T \\) computation. Without going into too many details, *RoPE* notes that positional information can be encoded into query-key pairs, *e.g.* \\( \mathbf{q}_i \\) and \\( \mathbf{x}_j \\) by rotating each vector by an angle \\( \theta * i \\) and \\( \theta * j \\) respectively with \\( i, j \\) describing each vectors sentence position: $$ \mathbf{\hat{q}}_i^T \mathbf{\hat{x}}_j = \mathbf{{q}}_i^T \mathbf{R}_{\theta, i -j} \mathbf{{x}}_j. $$ \\( \mathbf{R}_{\theta, i - j} \\) thereby represents a rotational matrix. \\( \theta \\) is *not* learned during training, but instead set to a pre-defined value that depends on the maximum input sequence length during training. > By doing so, the probability score between \\( \mathbf{q}_i \\) and \\( \mathbf{q}_j \\) is only affected if \\( i \ne j \\) and solely depends on the relative distance \\( i - j \\) regardless of each vector's specific positions \\( i \\) and \\( j \\) . *RoPE* is used in multiple of today's most important LLMs, such as: - [**Falcon**](https://huggingface.co/tiiuae/falcon-40b) - [**Llama**](https://huggingface.co/papers/2302.13971) - [**PaLM**](https://huggingface.co/papers/2204.02311) As an alternative, *ALiBi* proposes a much simpler relative position encoding scheme. The relative distance that input tokens have to each other is added as a negative integer scaled by a pre-defined value `m` to each query-key entry of the \\( \mathbf{QK}^T \\) matrix right before the softmax computation. ![](/blog/assets/163_optimize_llm/alibi.png) As shown in the [ALiBi](https://huggingface.co/papers/2108.12409) paper, this simple relative positional encoding allows the model to retain a high performance even at very long text input sequences. *ALiBi* is used in multiple of today's most important LLMs, such as: - [**MPT**](https://huggingface.co/mosaicml/mpt-30b) - [**BLOOM**](https://huggingface.co/bigscience/bloom) Both *RoPE* and *ALiBi* position encodings can extrapolate to input lengths not seen during training whereas it has been shown that extrapolation works much better out-of-the-box for *ALiBi* as compared to *RoPE*. For ALiBi, one simply increases the values of the lower triangular position matrix to match the length of the input sequence. For *RoPE*, keeping the same \\( \theta \\) that was used during training leads to poor results when passing text inputs much longer than those seen during training, *c.f* [Press et al.](https://huggingface.co/papers/2108.12409). However, the community has found a couple of effective tricks that adapt \\( \theta \\), thereby allowing *RoPE* position embeddings to work well for extrapolated text input sequences (see [here](https://github.com/huggingface/transformers/pull/24653)). > Both RoPE and ALiBi are relative positional embeddings that are *not* learned during training, but instead are based on the following intuitions: - Positional cues about the text inputs should be given directly to the \\( QK^T \\) matrix of the self-attention layer - The LLM should be incentivized to learn a constant *relative* distance positional encodings have to each other - The further text input tokens are from each other, the lower the probability of their query-value probability. Both RoPE and ALiBi lower the query-key probability of tokens far away from each other. RoPE by decreasing their vector product by increasing the angle between the query-key vectors. ALiBi by adding large negative numbers to the vector product In conclusion, LLMs that are intended to be deployed in tasks that require handling large text inputs are better trained with relative positional embeddings, such as RoPE and ALiBi. Also note that even if an LLM with RoPE and ALiBi has been trained only on a fixed length of say \\( N_1 = 2048 \\) it can still be used in practice with text inputs much larger than \\( N_1 \\), like \\( N_2 = 8192 > N_1 \\) by extrapolating the positional embeddings. ### 3.2 The key-value cache Auto-regressive text generation with LLMs works by iteratively putting in an input sequence, sampling the next token, appending the next token to the input sequence, and continuing to do so until the LLM produces a token that signifies that the generation has finished. Please have a look at [Transformer's Generate Text Tutorial](https://huggingface.co/docs/transformers/llm_tutorial#generate-text) to get a more visual explanation of how auto-regressive generation works. Let's run a quick code snippet to show how auto-regressive works in practice. We will simply take the most likely next token via `torch.argmax`. ```python input_ids = tokenizer(prompt, return_tensors="pt")["input_ids"].to("cuda") for _ in range(5): next_logits = model(input_ids)["logits"][:, -1:] next_token_id = torch.argmax(next_logits,dim=-1) input_ids = torch.cat([input_ids, next_token_id], dim=-1) print("shape of input_ids", input_ids.shape) generated_text = tokenizer.batch_decode(input_ids[:, -5:]) generated_text ``` **Output**: ``` shape of input_ids torch.Size([1, 21]) shape of input_ids torch.Size([1, 22]) shape of input_ids torch.Size([1, 23]) shape of input_ids torch.Size([1, 24]) shape of input_ids torch.Size([1, 25]) [' Here is a Python function'] ``` As we can see every time we increase the text input tokens by the just sampled token. With very few exceptions, LLMs are trained using the [causal language modeling objective](https://huggingface.co/docs/transformers/tasks/language_modeling#causal-language-modeling) and therefore mask the upper triangle matrix of the attention score - this is why in the two diagrams above the attention scores are left blank (*a.k.a* have 0 probability). For a quick recap on causal language modeling you can refer to the [*Illustrated Self Attention blog*](https://jalammar.github.io/illustrated-gpt2/#part-2-illustrated-self-attention). As a consequence, tokens *never* depend on previous tokens, more specifically the \\( \mathbf{q}_i \\) vector is never put in relation with any key, values vectors \\( \mathbf{k}_j, \mathbf{v}_j \\) if \\( j > i \\) . Instead \\( \mathbf{q}_i \\) only attends to previous key-value vectors \\( \mathbf{k}_{m < i}, \mathbf{v}_{m < i} \text{ , for } m \in \{0, \ldots i - 1\} \\). In order to reduce unnecessary computation, one can therefore cache each layer's key-value vectors for all previous timesteps. In the following, we will tell the LLM to make use of the key-value cache by retrieving and forwarding it for each forward pass. In Transformers, we can retrieve the key-value cache by passing the `use_cache` flag to the `forward` call and can then pass it with the current token. ```python past_key_values = None # past_key_values is the key-value cache generated_tokens = [] next_token_id = tokenizer(prompt, return_tensors="pt")["input_ids"].to("cuda") for _ in range(5): next_logits, past_key_values = model(next_token_id, past_key_values=past_key_values, use_cache=True).to_tuple() next_logits = next_logits[:, -1:] next_token_id = torch.argmax(next_logits, dim=-1) print("shape of input_ids", next_token_id.shape) print("length of key-value cache", len(past_key_values[0][0])) # past_key_values are of shape [num_layers, 0 for k, 1 for v, batch_size, length, hidden_dim] generated_tokens.append(next_token_id.item()) generated_text = tokenizer.batch_decode(generated_tokens) generated_text ``` **Output**: ``` shape of input_ids torch.Size([1, 1]) length of key-value cache 20 shape of input_ids torch.Size([1, 1]) length of key-value cache 21 shape of input_ids torch.Size([1, 1]) length of key-value cache 22 shape of input_ids torch.Size([1, 1]) length of key-value cache 23 shape of input_ids torch.Size([1, 1]) length of key-value cache 24 [' Here', ' is', ' a', ' Python', ' function'] ``` As one can see, when using the key-value cache the text input tokens are *not* increased in length, but remain a single input vector. The length of the key-value cache on the other hand is increased by one at every decoding step. > Making use of the key-value cache means that the \\( \mathbf{QK}^T \\) is essentially reduced to \\( \mathbf{q}_c\mathbf{K}^T \\) with \\( \mathbf{q}_c \\) being the query projection of the currently passed input token which is *always* just a single vector. Using the key-value cache has two advantages: - Significant increase in computational efficiency as less computations are performed compared to computing the full \\( \mathbf{QK}^T \\) matrix. This leads to an increase in inference speed - The maximum required memory is not increased quadratically with the number of generated tokens, but only increases linearly. > One should *always* make use of the key-value cache as it leads to identical results and a significant speed-up for longer input sequences. Transformers has the key-value cache enabled by default when making use of the text pipeline or the [`generate` method](https://huggingface.co/docs/transformers/main_classes/text_generation). We have an entire guide dedicated to caches [here](./kv_cache). <Tip warning={true}> Note that, despite our advice to use key-value caches, your LLM output may be slightly different when you use them. This is a property of the matrix multiplication kernels themselves -- you can read more about it [here](https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535). </Tip> #### 3.2.1 Multi-round conversation The key-value cache is especially useful for applications such as chat where multiple passes of auto-regressive decoding are required. Let's look at an example. ``` User: How many people live in France? Assistant: Roughly 75 million people live in France User: And how many are in Germany? Assistant: Germany has ca. 81 million inhabitants ``` In this chat, the LLM runs auto-regressive decoding twice: 1. The first time, the key-value cache is empty and the input prompt is `"User: How many people live in France?"` and the model auto-regressively generates the text `"Roughly 75 million people live in France"` while increasing the key-value cache at every decoding step. 2. The second time the input prompt is `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"`. Thanks to the cache, all key-value vectors for the first two sentences are already computed. Therefore the input prompt only consists of `"User: And how many in Germany?"`. While processing the shortened input prompt, its computed key-value vectors are concatenated to the key-value cache of the first decoding. The second Assistant's answer `"Germany has ca. 81 million inhabitants"` is then auto-regressively generated with the key-value cache consisting of encoded key-value vectors of `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"`. Two things should be noted here: 1. Keeping all the context is crucial for LLMs deployed in chat so that the LLM understands all the previous context of the conversation. E.g. for the example above the LLM needs to understand that the user refers to the population when asking `"And how many are in Germany"`. 2. The key-value cache is extremely useful for chat as it allows us to continuously grow the encoded chat history instead of having to re-encode the chat history again from scratch (as e.g. would be the case when using an encoder-decoder architecture). In `transformers`, a `generate` call will return `past_key_values` when `return_dict_in_generate=True` is passed, in addition to the default `use_cache=True`. Note that it is not yet available through the `pipeline` interface. ```python # Generation as usual prompt = system_prompt + "Question: Please write a function in Python that transforms bytes to Giga bytes.\n\nAnswer: Here" model_inputs = tokenizer(prompt, return_tensors='pt') generation_output = model.generate(**model_inputs, max_new_tokens=60, return_dict_in_generate=True) decoded_output = tokenizer.batch_decode(generation_output.sequences)[0] # Piping the returned `past_key_values` to speed up the next conversation round prompt = decoded_output + "\nQuestion: How can I modify the function above to return Mega bytes instead?\n\nAnswer: Here" model_inputs = tokenizer(prompt, return_tensors='pt') generation_output = model.generate( **model_inputs, past_key_values=generation_output.past_key_values, max_new_tokens=60, return_dict_in_generate=True ) tokenizer.batch_decode(generation_output.sequences)[0][len(prompt):] ``` **Output**: ``` is a modified version of the function that returns Mega bytes instead. def bytes_to_megabytes(bytes): return bytes / 1024 / 1024 Answer: The function takes a number of bytes as input and returns the number of ``` Great, no additional time is spent recomputing the same key and values for the attention layer! There is however one catch. While the required peak memory for the \\( \mathbf{QK}^T \\) matrix is significantly reduced, holding the key-value cache in memory can become very memory expensive for long input sequences or multi-turn chat. Remember that the key-value cache needs to store the key-value vectors for all previous input vectors \\( \mathbf{x}_i \text{, for } i \in \{1, \ldots, c - 1\} \\) for all self-attention layers and for all attention heads. Let's compute the number of float values that need to be stored in the key-value cache for the LLM `bigcode/octocoder` that we used before. The number of float values amounts to two times the sequence length times the number of attention heads times the attention head dimension and times the number of layers. Computing this for our LLM at a hypothetical input sequence length of 16000 gives: ```python config = model.config 2 * 16_000 * config.n_layer * config.n_head * config.n_embd // config.n_head ``` **Output**: ``` 7864320000 ``` Roughly 8 billion float values! Storing 8 billion float values in `float16` precision requires around 15 GB of RAM which is circa half as much as the model weights themselves! Researchers have proposed two methods that allow to significantly reduce the memory cost of storing the key-value cache, which are explored in the next subsections. #### 3.2.2 Multi-Query-Attention (MQA) [Multi-Query-Attention](https://huggingface.co/papers/1911.02150) was proposed in Noam Shazeer's *Fast Transformer Decoding: One Write-Head is All You Need* paper. As the title says, Noam found out that instead of using `n_head` key-value projections weights, one can use a single head-value projection weight pair that is shared across all attention heads without that the model's performance significantly degrades. > By using a single head-value projection weight pair, the key value vectors \\( \mathbf{k}_i, \mathbf{v}_i \\) have to be identical across all attention heads which in turn means that we only need to store 1 key-value projection pair in the cache instead of `n_head` ones. As most LLMs use between 20 and 100 attention heads, MQA significantly reduces the memory consumption of the key-value cache. For the LLM used in this notebook we could therefore reduce the required memory consumption from 15 GB to less than 400 MB at an input sequence length of 16000. In addition to memory savings, MQA also leads to improved computational efficiency as explained in the following. In auto-regressive decoding, large key-value vectors need to be reloaded, concatenated with the current key-value vector pair to be then fed into the \\( \mathbf{q}_c\mathbf{K}^T \\) computation at every step. For auto-regressive decoding, the required memory bandwidth for the constant reloading can become a serious time bottleneck. By reducing the size of the key-value vectors less memory needs to be accessed, thus reducing the memory bandwidth bottleneck. For more detail, please have a look at [Noam's paper](https://huggingface.co/papers/1911.02150). The important part to understand here is that reducing the number of key-value attention heads to 1 only makes sense if a key-value cache is used. The peak memory consumption of the model for a single forward pass without key-value cache stays unchanged as every attention head still has a unique query vector so that each attention head still has a different \\( \mathbf{QK}^T \\) matrix. MQA has seen wide adoption by the community and is now used by many of the most popular LLMs: - [**Falcon**](https://huggingface.co/tiiuae/falcon-40b) - [**PaLM**](https://huggingface.co/papers/2204.02311) - [**MPT**](https://huggingface.co/mosaicml/mpt-30b) - [**BLOOM**](https://huggingface.co/bigscience/bloom) Also, the checkpoint used in this notebook - `bigcode/octocoder` - makes use of MQA. #### 3.2.3 Grouped-Query-Attention (GQA) [Grouped-Query-Attention](https://huggingface.co/papers/2305.13245), as proposed by Ainslie et al. from Google, found that using MQA can often lead to quality degradation compared to using vanilla multi-key-value head projections. The paper argues that more model performance can be kept by less drastically reducing the number of query head projection weights. Instead of using just a single key-value projection weight, `n < n_head` key-value projection weights should be used. By choosing `n` to a significantly smaller value than `n_head`, such as 2,4 or 8 almost all of the memory and speed gains from MQA can be kept while sacrificing less model capacity and thus arguably less performance. Moreover, the authors of GQA found out that existing model checkpoints can be *uptrained* to have a GQA architecture with as little as 5% of the original pre-training compute. While 5% of the original pre-training compute can still be a massive amount, GQA *uptraining* allows existing checkpoints to be useful for longer input sequences. GQA was only recently proposed which is why there is less adoption at the time of writing this notebook. The most notable application of GQA is [Llama-v2](https://huggingface.co/meta-llama/Llama-2-70b-hf). > As a conclusion, it is strongly recommended to make use of either GQA or MQA if the LLM is deployed with auto-regressive decoding and is required to handle large input sequences as is the case for example for chat. ## Conclusion The research community is constantly coming up with new, nifty ways to speed up inference time for ever-larger LLMs. As an example, one such promising research direction is [speculative decoding](https://huggingface.co/papers/2211.17192) where "easy tokens" are generated by smaller, faster language models and only "hard tokens" are generated by the LLM itself. Going into more detail is out of the scope of this notebook, but can be read upon in this [nice blog post](https://huggingface.co/blog/assisted-generation). The reason massive LLMs such as GPT3/4, Llama-2-70b, Claude, PaLM can run so quickly in chat-interfaces such as [Hugging Face Chat](https://huggingface.co/chat/) or ChatGPT is to a big part thanks to the above-mentioned improvements in precision, algorithms, and architecture. Going forward, accelerators such as GPUs, TPUs, etc... will only get faster and allow for more memory, but one should nevertheless always make sure to use the best available algorithms and architectures to get the most bang for your buck ๐Ÿค—
transformers/docs/source/en/llm_tutorial_optimization.md/0
{ "file_path": "transformers/docs/source/en/llm_tutorial_optimization.md", "repo_id": "transformers", "token_count": 14836 }
386
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipelines The pipelines are a great and easy way to use models for inference. These pipelines are objects that abstract most of the complex code from the library, offering a simple API dedicated to several tasks, including Named Entity Recognition, Masked Language Modeling, Sentiment Analysis, Feature Extraction and Question Answering. See the [task summary](../task_summary) for examples of use. There are two categories of pipeline abstractions to be aware about: - The [`pipeline`] which is the most powerful object encapsulating all other pipelines. - Task-specific pipelines are available for [audio](#audio), [computer vision](#computer-vision), [natural language processing](#natural-language-processing), and [multimodal](#multimodal) tasks. ## The pipeline abstraction The *pipeline* abstraction is a wrapper around all the other available pipelines. It is instantiated as any other pipeline but can provide additional quality of life. Simple call on one item: ```python >>> pipe = pipeline("text-classification") >>> pipe("This restaurant is awesome") [{'label': 'POSITIVE', 'score': 0.9998743534088135}] ``` If you want to use a specific model from the [hub](https://huggingface.co) you can ignore the task if the model on the hub already defines it: ```python >>> pipe = pipeline(model="FacebookAI/roberta-large-mnli") >>> pipe("This restaurant is awesome") [{'label': 'NEUTRAL', 'score': 0.7313136458396912}] ``` To call a pipeline on many items, you can call it with a *list*. ```python >>> pipe = pipeline("text-classification") >>> pipe(["This restaurant is awesome", "This restaurant is awful"]) [{'label': 'POSITIVE', 'score': 0.9998743534088135}, {'label': 'NEGATIVE', 'score': 0.9996669292449951}] ``` To iterate over full datasets it is recommended to use a `dataset` directly. This means you don't need to allocate the whole dataset at once, nor do you need to do batching yourself. This should work just as fast as custom loops on GPU. If it doesn't don't hesitate to create an issue. ```python import datasets from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset from tqdm.auto import tqdm pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0) dataset = datasets.load_dataset("superb", name="asr", split="test") # KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item # as we're not interested in the *target* part of the dataset. For sentence pair use KeyPairDataset for out in tqdm(pipe(KeyDataset(dataset, "file"))): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # .... ``` For ease of use, a generator is also possible: ```python from transformers import pipeline pipe = pipeline("text-classification") def data(): while True: # This could come from a dataset, a database, a queue or HTTP request # in a server # Caveat: because this is iterative, you cannot use `num_workers > 1` variable # to use multiple threads to preprocess data. You can still have 1 thread that # does the preprocessing while the main runs the big inference yield "This is a test" for out in pipe(data()): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # .... ``` [[autodoc]] pipeline ## Pipeline batching All pipelines can use batching. This will work whenever the pipeline uses its streaming ability (so when passing lists or `Dataset` or `generator`). ```python from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset import datasets dataset = datasets.load_dataset("imdb", name="plain_text", split="unsupervised") pipe = pipeline("text-classification", device=0) for out in pipe(KeyDataset(dataset, "text"), batch_size=8, truncation="only_first"): print(out) # [{'label': 'POSITIVE', 'score': 0.9998743534088135}] # Exactly the same output as before, but the content are passed # as batches to the model ``` <Tip warning={true}> However, this is not automatically a win for performance. It can be either a 10x speedup or 5x slowdown depending on hardware, data and the actual model being used. Example where it's mostly a speedup: </Tip> ```python from transformers import pipeline from torch.utils.data import Dataset from tqdm.auto import tqdm pipe = pipeline("text-classification", device=0) class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): return "This is a test" dataset = MyDataset() for batch_size in [1, 8, 64, 256]: print("-" * 30) print(f"Streaming batch_size={batch_size}") for out in tqdm(pipe(dataset, batch_size=batch_size), total=len(dataset)): pass ``` ``` # On GTX 970 ------------------------------ Streaming no batching 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 5000/5000 [00:26<00:00, 187.52it/s] ------------------------------ Streaming batch_size=8 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 5000/5000 [00:04<00:00, 1205.95it/s] ------------------------------ Streaming batch_size=64 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 5000/5000 [00:02<00:00, 2478.24it/s] ------------------------------ Streaming batch_size=256 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 5000/5000 [00:01<00:00, 2554.43it/s] (diminishing returns, saturated the GPU) ``` Example where it's most a slowdown: ```python class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): if i % 64 == 0: n = 100 else: n = 1 return "This is a test" * n ``` This is a occasional very long sentence compared to the other. In that case, the **whole** batch will need to be 400 tokens long, so the whole batch will be [64, 400] instead of [64, 4], leading to the high slowdown. Even worse, on bigger batches, the program simply crashes. ``` ------------------------------ Streaming no batching 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 1000/1000 [00:05<00:00, 183.69it/s] ------------------------------ Streaming batch_size=8 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 1000/1000 [00:03<00:00, 265.74it/s] ------------------------------ Streaming batch_size=64 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 1000/1000 [00:26<00:00, 37.80it/s] ------------------------------ Streaming batch_size=256 0%| | 0/1000 [00:00<?, ?it/s] Traceback (most recent call last): File "/home/nicolas/src/transformers/test.py", line 42, in <module> for out in tqdm(pipe(dataset, batch_size=256), total=len(dataset)): .... q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) RuntimeError: CUDA out of memory. Tried to allocate 376.00 MiB (GPU 0; 3.95 GiB total capacity; 1.72 GiB already allocated; 354.88 MiB free; 2.46 GiB reserved in total by PyTorch) ``` There are no good (general) solutions for this problem, and your mileage may vary depending on your use cases. Rule of thumb: For users, a rule of thumb is: - **Measure performance on your load, with your hardware. Measure, measure, and keep measuring. Real numbers are the only way to go.** - If you are latency constrained (live product doing inference), don't batch. - If you are using CPU, don't batch. - If you are using throughput (you want to run your model on a bunch of static data), on GPU, then: - If you have no clue about the size of the sequence_length ("natural" data), by default don't batch, measure and try tentatively to add it, add OOM checks to recover when it will fail (and it will at some point if you don't control the sequence_length.) - If your sequence_length is super regular, then batching is more likely to be VERY interesting, measure and push it until you get OOMs. - The larger the GPU the more likely batching is going to be more interesting - As soon as you enable batching, make sure you can handle OOMs nicely. ## Pipeline chunk batching `zero-shot-classification` and `question-answering` are slightly specific in the sense, that a single input might yield multiple forward pass of a model. Under normal circumstances, this would yield issues with `batch_size` argument. In order to circumvent this issue, both of these pipelines are a bit specific, they are `ChunkPipeline` instead of regular `Pipeline`. In short: ```python preprocessed = pipe.preprocess(inputs) model_outputs = pipe.forward(preprocessed) outputs = pipe.postprocess(model_outputs) ``` Now becomes: ```python all_model_outputs = [] for preprocessed in pipe.preprocess(inputs): model_outputs = pipe.forward(preprocessed) all_model_outputs.append(model_outputs) outputs = pipe.postprocess(all_model_outputs) ``` This should be very transparent to your code because the pipelines are used in the same way. This is a simplified view, since the pipeline can handle automatically the batch to ! Meaning you don't have to care about how many forward passes you inputs are actually going to trigger, you can optimize the `batch_size` independently of the inputs. The caveats from the previous section still apply. ## Pipeline FP16 inference Models can be run in FP16 which can be significantly faster on GPU while saving memory. Most models will not suffer noticeable performance loss from this. The larger the model, the less likely that it will. To enable FP16 inference, you can simply pass `dtype=torch.float16` or `dtype='float16'` to the pipeline constructor. Note that this only works for models with a PyTorch backend. Your inputs will be converted to FP16 internally. ## Pipeline custom code If you want to override a specific pipeline. Don't hesitate to create an issue for your task at hand, the goal of the pipeline is to be easy to use and support most cases, so `transformers` could maybe support your use case. If you want to try simply you can: - Subclass your pipeline of choice ```python class MyPipeline(TextClassificationPipeline): def postprocess(): # Your code goes here scores = scores * 100 # And here my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) # or if you use *pipeline* function, then: my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline) ``` That should enable you to do all the custom code you want. ## Implementing a pipeline [Implementing a new pipeline](../add_new_pipeline) ## Audio Pipelines available for audio tasks include the following. ### AudioClassificationPipeline [[autodoc]] AudioClassificationPipeline - __call__ - all ### AutomaticSpeechRecognitionPipeline [[autodoc]] AutomaticSpeechRecognitionPipeline - __call__ - all ### TextToAudioPipeline [[autodoc]] TextToAudioPipeline - __call__ - all ### ZeroShotAudioClassificationPipeline [[autodoc]] ZeroShotAudioClassificationPipeline - __call__ - all ## Computer vision Pipelines available for computer vision tasks include the following. ### DepthEstimationPipeline [[autodoc]] DepthEstimationPipeline - __call__ - all ### ImageClassificationPipeline [[autodoc]] ImageClassificationPipeline - __call__ - all ### ImageSegmentationPipeline [[autodoc]] ImageSegmentationPipeline - __call__ - all ### ImageToImagePipeline [[autodoc]] ImageToImagePipeline - __call__ - all ### ObjectDetectionPipeline [[autodoc]] ObjectDetectionPipeline - __call__ - all ### VideoClassificationPipeline [[autodoc]] VideoClassificationPipeline - __call__ - all ### ZeroShotImageClassificationPipeline [[autodoc]] ZeroShotImageClassificationPipeline - __call__ - all ### ZeroShotObjectDetectionPipeline [[autodoc]] ZeroShotObjectDetectionPipeline - __call__ - all ## Natural Language Processing Pipelines available for natural language processing tasks include the following. ### FillMaskPipeline [[autodoc]] FillMaskPipeline - __call__ - all ### QuestionAnsweringPipeline [[autodoc]] QuestionAnsweringPipeline - __call__ - all ### SummarizationPipeline [[autodoc]] SummarizationPipeline - __call__ - all ### TableQuestionAnsweringPipeline [[autodoc]] TableQuestionAnsweringPipeline - __call__ ### TextClassificationPipeline [[autodoc]] TextClassificationPipeline - __call__ - all ### TextGenerationPipeline [[autodoc]] TextGenerationPipeline - __call__ - all ### Text2TextGenerationPipeline [[autodoc]] Text2TextGenerationPipeline - __call__ - all ### TokenClassificationPipeline [[autodoc]] TokenClassificationPipeline - __call__ - all ### TranslationPipeline [[autodoc]] TranslationPipeline - __call__ - all ### ZeroShotClassificationPipeline [[autodoc]] ZeroShotClassificationPipeline - __call__ - all ## Multimodal Pipelines available for multimodal tasks include the following. ### DocumentQuestionAnsweringPipeline [[autodoc]] DocumentQuestionAnsweringPipeline - __call__ - all ### FeatureExtractionPipeline [[autodoc]] FeatureExtractionPipeline - __call__ - all ### ImageFeatureExtractionPipeline [[autodoc]] ImageFeatureExtractionPipeline - __call__ - all ### ImageToTextPipeline [[autodoc]] ImageToTextPipeline - __call__ - all ### ImageTextToTextPipeline [[autodoc]] ImageTextToTextPipeline - __call__ - all ### MaskGenerationPipeline [[autodoc]] MaskGenerationPipeline - __call__ - all ### VisualQuestionAnsweringPipeline [[autodoc]] VisualQuestionAnsweringPipeline - __call__ - all ## Parent class: `Pipeline` [[autodoc]] Pipeline
transformers/docs/source/en/main_classes/pipelines.md/0
{ "file_path": "transformers/docs/source/en/main_classes/pipelines.md", "repo_id": "transformers", "token_count": 4677 }
387
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2025-05-13 and added to Hugging Face Transformers on 2025-03-04.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # Aya Vision [Aya Vision](https://huggingface.co/papers/2505.08751) is a family of open-weight multimodal vision-language models from Cohere Labs. It is trained with a synthetic annotation framework that generates high-quality multilingual image captions, improving Aya Vision's generated responses. In addition, a cross-modal model merging technique is used to prevent the model from losing its text capabilities after adding vision capabilities. The model combines a CommandR-7B language model with a SigLIP vision encoder. You can find all the original Aya Vision checkpoints under the [Aya Vision](https://huggingface.co/collections/CohereLabs/cohere-labs-aya-vision-67c4ccd395ca064308ee1484) collection. > [!TIP] > This model was contributed by [saurabhdash](https://huggingface.co/saurabhdash) and [yonigozlan](https://huggingface.co/yonigozlan). > > Click on the Aya Vision models in the right sidebar for more examples of how to apply Aya Vision to different image-to-text tasks. The example below demonstrates how to generate text based on an image with [`Pipeline`] or the [`AutoModel`] class. <hfoptions id="usage"> <hfoption id="Pipeline"> ```python from transformers import pipeline pipe = pipeline(model="CohereLabs/aya-vision-8b", task="image-text-to-text", device_map="auto") # Format message with the aya-vision chat template messages = [ {"role": "user", "content": [ {"type": "image", "url": "https://media.istockphoto.com/id/458012057/photo/istanbul-turkey.jpg?s=612x612&w=0&k=20&c=qogAOVvkpfUyqLUMr_XJQyq-HkACXyYUSZbKhBlPrxo="}, {"type": "text", "text": "Bu resimde hangi anฤฑt gรถsterilmektedir?"}, ]}, ] outputs = pipe(text=messages, max_new_tokens=300, return_full_text=False) print(outputs) ``` </hfoption> <hfoption id="AutoModel"> ```python # pip install 'git+https://github.com/huggingface/transformers.git@v4.49.0-Aya Vision' import torch from transformers import AutoProcessor, AutoModelForImageTextToText model_id = "CohereLabs/aya-vision-8b" processor = AutoProcessor.from_pretrained(model_id) model = AutoModelForImageTextToText.from_pretrained( model_id, device_map="auto", dtype=torch.float16 ) # Format message with the aya-vision chat template messages = [ {"role": "user", "content": [ {"type": "image", "url": "https://pbs.twimg.com/media/Fx7YvfQWYAIp6rZ?format=jpg&name=medium"}, {"type": "text", "text": "เคšเคฟเคคเฅเคฐ เคฎเฅ‡เค‚ เคฒเคฟเค–เคพ เคชเคพเค  เค•เฅเคฏเคพ เค•เคนเคคเคพ เคนเฅˆ?"}, ]}, ] inputs = processor.apply_chat_template( messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(model.device) gen_tokens = model.generate( **inputs, max_new_tokens=300, do_sample=True, temperature=0.3, ) print(processor.tokenizer.decode(gen_tokens[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)) ``` </hfoption> </hfoptions> Quantization reduces the memory footprint of large models by representing weights at lower precision. Refer to the [Quantization](../quantization/overview) overview for supported backends. The example below uses [bitsandbytes](../quantization/bitsandbytes) to only quantize the weights to 4-bits. ```python import torch from transformers import ( AutoProcessor, AutoModelForImageTextToText, BitsAndBytesConfig ) bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True ) processor = AutoProcessor.from_pretrained("CohereLabs/aya-vision-32b", use_fast=True) model = AutoModelForImageTextToText.from_pretrained( "CohereLabs/aya-vision-32b", quantization_config=bnb_config, device_map="auto" ) inputs = processor.apply_chat_template( [ {"role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/roschmid/dog-races/resolve/main/images/Border_Collie.jpg"}, {"type": "text", "text":"Describe what you see."} ]} ], padding=True, add_generation_prompt=True, tokenize=True, return_tensors="pt" ).to(model.device) generated = model.generate(**inputs, max_new_tokens=50) print(processor.tokenizer.decode(generated[0], skip_special_tokens=True)) ``` ## Notes - Images are represented with the `<image>` tag in the chat template. - Use the [`~ProcessorMixin.apply_chat_template`] method to correctly format inputs. - The example below demonstrates inference with multiple images. ```py import torch from transformers import AutoProcessor, AutoModelForImageTextToText processor = AutoProcessor.from_pretrained("CohereForAI/aya-vision-8b") model = AutoModelForImageTextToText.from_pretrained( "CohereForAI/aya-vision-8b", device_map="auto", dtype=torch.float16 ) messages = [ { "role": "user", "content": [ { "type": "image", "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg", }, { "type": "image", "url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg", }, { "type": "text", "text": "These images depict two different landmarks. Can you identify them?", }, ], }, ] inputs = processor.apply_chat_template( messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(model.device) gen_tokens = model.generate( **inputs, max_new_tokens=300, do_sample=True, temperature=0.3, ) gen_text = processor.tokenizer.decode(gen_tokens[0][inputs.input_ids.shape[1]:], skip_special_tokens=True) print(gen_text) ``` - The example below demonstrates inference with batched inputs. ```py import torch from transformers import AutoProcessor, AutoModelForImageTextToText processor = AutoProcessor.from_pretrained(model_id) model = AutoModelForImageTextToText.from_pretrained( "CohereForAI/aya-vision-8b", device_map="auto", dtype=torch.float16 ) batch_messages = [ [ { "role": "user", "content": [ {"type": "image", "url": "https://llava-vl.github.io/static/images/view.jpg"}, {"type": "text", "text": "Write a haiku for this image"}, ], }, ], [ { "role": "user", "content": [ { "type": "image", "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg", }, { "type": "image", "url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg", }, { "type": "text", "text": "These images depict two different landmarks. Can you identify them?", }, ], }, ], ] batch_inputs = processor.apply_chat_template( batch_messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(model.device) batch_outputs = model.generate( **batch_inputs, max_new_tokens=300, do_sample=True, temperature=0.3, ) for i, output in enumerate(batch_outputs): response = processor.tokenizer.decode( output[batch_inputs.input_ids.shape[1]:], skip_special_tokens=True ) print(f"Response {i+1}:\n{response}\n") ``` ## AyaVisionProcessor [[autodoc]] AyaVisionProcessor ## AyaVisionConfig [[autodoc]] AyaVisionConfig ## AyaVisionModel [[autodoc]] AyaVisionModel ## AyaVisionForConditionalGeneration [[autodoc]] AyaVisionForConditionalGeneration - forward
transformers/docs/source/en/model_doc/aya_vision.md/0
{ "file_path": "transformers/docs/source/en/model_doc/aya_vision.md", "repo_id": "transformers", "token_count": 4027 }
388
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2020-04-28 and added to Hugging Face Transformers on 2021-01-05.* # Blenderbot Small <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> Note that [`BlenderbotSmallModel`] and [`BlenderbotSmallForConditionalGeneration`] are only used in combination with the checkpoint [facebook/blenderbot-90M](https://huggingface.co/facebook/blenderbot-90M). Larger Blenderbot checkpoints should instead be used with [`BlenderbotModel`] and [`BlenderbotForConditionalGeneration`] ## Overview The Blender chatbot model was proposed in [Recipes for building an open-domain chatbot](https://huggingface.co/papers/2004.13637) Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020. The abstract of the paper is the following: *Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The authors' code can be found [here](https://github.com/facebookresearch/ParlAI). ## Usage tips Blenderbot Small is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. ## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## BlenderbotSmallConfig [[autodoc]] BlenderbotSmallConfig ## BlenderbotSmallTokenizer [[autodoc]] BlenderbotSmallTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## BlenderbotSmallTokenizerFast [[autodoc]] BlenderbotSmallTokenizerFast ## BlenderbotSmallModel [[autodoc]] BlenderbotSmallModel - forward ## BlenderbotSmallForConditionalGeneration [[autodoc]] BlenderbotSmallForConditionalGeneration - forward ## BlenderbotSmallForCausalLM [[autodoc]] BlenderbotSmallForCausalLM - forward
transformers/docs/source/en/model_doc/blenderbot-small.md/0
{ "file_path": "transformers/docs/source/en/model_doc/blenderbot-small.md", "repo_id": "transformers", "token_count": 1165 }
389
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2023-05-12 and added to Hugging Face Transformers on 2023-11-10.* # CLVP <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The CLVP (Contrastive Language-Voice Pretrained Transformer) model was proposed in [Better speech synthesis through scaling](https://huggingface.co/papers/2305.07243) by James Betker. The abstract from the paper is the following: *In recent years, the field of image generation has been revolutionized by the application of autoregressive transformers and DDPMs. These approaches model the process of image generation as a step-wise probabilistic processes and leverage large amounts of compute and data to learn the image distribution. This methodology of improving performance need not be confined to images. This paper describes a way to apply advances in the image generative domain to speech synthesis. The result is TorToise - an expressive, multi-voice text-to-speech system.* This model was contributed by [Susnato Dhar](https://huggingface.co/susnato). The original code can be found [here](https://github.com/neonbjb/tortoise-tts). ## Usage tips 1. CLVP is an integral part of the Tortoise TTS model. 2. CLVP can be used to compare different generated speech candidates with the provided text, and the best speech tokens are forwarded to the diffusion model. 3. The use of the [`ClvpModelForConditionalGeneration.generate()`] method is strongly recommended for tortoise usage. 4. Note that the CLVP model expects the audio to be sampled at 22.05 kHz contrary to other audio models which expects 16 kHz. ## Brief Explanation: - The [`ClvpTokenizer`] tokenizes the text input, and the [`ClvpFeatureExtractor`] extracts the log mel-spectrogram from the desired audio. - [`ClvpConditioningEncoder`] takes those text tokens and audio representations and converts them into embeddings conditioned on the text and audio. - The [`ClvpForCausalLM`] uses those embeddings to generate multiple speech candidates. - Each speech candidate is passed through the speech encoder ([`ClvpEncoder`]) which converts them into a vector representation, and the text encoder ([`ClvpEncoder`]) converts the text tokens into the same latent space. - At the end, we compare each speech vector with the text vector to see which speech vector is most similar to the text vector. - [`ClvpModelForConditionalGeneration.generate()`] compresses all of the logic described above into a single method. Example : ```python >>> import datasets >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration >>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library). >>> text = "This is an example text." >>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) >>> sample = ds[0]["audio"] >>> # Define processor and model. >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") >>> # Generate processor output and model output. >>> processor_output = processor(raw_speech=sample["array"], sampling_rate=sample["sampling_rate"], text=text, return_tensors="pt") >>> generated_output = model.generate(**processor_output) ``` ## ClvpConfig [[autodoc]] ClvpConfig - from_sub_model_configs ## ClvpEncoderConfig [[autodoc]] ClvpEncoderConfig ## ClvpDecoderConfig [[autodoc]] ClvpDecoderConfig ## ClvpTokenizer [[autodoc]] ClvpTokenizer - save_vocabulary ## ClvpFeatureExtractor [[autodoc]] ClvpFeatureExtractor - __call__ ## ClvpProcessor [[autodoc]] ClvpProcessor - __call__ - decode - batch_decode ## ClvpModelForConditionalGeneration [[autodoc]] ClvpModelForConditionalGeneration - forward - generate - get_text_features - get_speech_features ## ClvpForCausalLM [[autodoc]] ClvpForCausalLM ## ClvpModel [[autodoc]] ClvpModel ## ClvpEncoder [[autodoc]] ClvpEncoder ## ClvpDecoder [[autodoc]] ClvpDecoder
transformers/docs/source/en/model_doc/clvp.md/0
{ "file_path": "transformers/docs/source/en/model_doc/clvp.md", "repo_id": "transformers", "token_count": 1436 }
390
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2021-03-29 and added to Hugging Face Transformers on 2022-05-18.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # Convolutional Vision Transformer (CvT) [Convolutional Vision Transformer (CvT)](https://huggingface.co/papers/2103.15808) is a model that combines the strengths of convolutional neural networks (CNNs) and Vision transformers for the computer vision tasks. It introduces convolutional layers into the vision transformer architecture, allowing it to capture local patterns in images while maintaining the global context provided by self-attention mechanisms. You can find all the CvT checkpoints under the [Microsoft](https://huggingface.co/microsoft?search_models=cvt) organization. > [!TIP] > This model was contributed by [anujunj](https://huggingface.co/anugunj). > > Click on the CvT models in the right sidebar for more examples of how to apply CvT to different computer vision tasks. The example below demonstrates how to classify an image with [`Pipeline`] or the [`AutoModel`] class. <hfoptions id="usage"> <hfoption id="Pipeline"> ```py import torch from transformers import pipeline pipeline = pipeline( task="image-classification", model="microsoft/cvt-13", dtype=torch.float16, device=0 ) pipeline("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg") ``` </hfoption> <hfoption id="AutoModel"> ```py import torch import requests from PIL import Image from transformers import AutoModelForImageClassification, AutoImageProcessor image_processor = AutoImageProcessor.from_pretrained("microsoft/cvt-13") model = AutoModelForImageClassification.from_pretrained( "microsoft/cvt-13", dtype=torch.float16, device_map="auto" ) url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" image = Image.open(requests.get(url, stream=True).raw) inputs = image_processor(image, return_tensors="pt").to(model.device) with torch.no_grad(): logits = model(**inputs).logits predicted_class_id = logits.argmax(dim=-1).item() class_labels = model.config.id2label predicted_class_label = class_labels[predicted_class_id] print(f"The predicted class label is: {predicted_class_label}") ``` </hfoption> </hfoptions> ## Resources Refer to this set of ViT [notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) for examples of inference and fine-tuning on custom datasets. Replace [`ViTFeatureExtractor`] and [`ViTForImageClassification`] in these notebooks with [`AutoImageProcessor`] and [`CvtForImageClassification`]. ## CvtConfig [[autodoc]] CvtConfig ## CvtModel [[autodoc]] CvtModel - forward ## CvtForImageClassification [[autodoc]] CvtForImageClassification - forward
transformers/docs/source/en/model_doc/cvt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/cvt.md", "repo_id": "transformers", "token_count": 1175 }
391
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2024-01-19 and added to Hugging Face Transformers on 2024-01-25.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # Depth Anything [Depth Anything](https://huggingface.co/papers/2401.10891) is designed to be a foundation model for monocular depth estimation (MDE). It is jointly trained on labeled and ~62M unlabeled images to enhance the dataset. It uses a pretrained [DINOv2](./dinov2) model as an image encoder to inherit its existing rich semantic priors, and [DPT](./dpt) as the decoder. A teacher model is trained on unlabeled images to create pseudo-labels. The student model is trained on a combination of the pseudo-labels and labeled images. To improve the student model's performance, strong perturbations are added to the unlabeled images to challenge the student model to learn more visual knowledge from the image. You can find all the original Depth Anything checkpoints under the [Depth Anything](https://huggingface.co/collections/LiheYoung/depth-anything-release-65b317de04eec72abf6b55aa) collection. > [!TIP] > Click on the Depth Anything models in the right sidebar for more examples of how to apply Depth Anything to different vision tasks. The example below demonstrates how to obtain a depth map with [`Pipeline`] or the [`AutoModel`] class. <hfoptions id="usage"> <hfoption id="Pipeline"> ```py import torch from transformers import pipeline pipe = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-base-hf", dtype=torch.bfloat16, device=0) pipe("http://images.cocodataset.org/val2017/000000039769.jpg")["depth"] ``` </hfoption> <hfoption id="AutoModel"> ```py import torch import requests import numpy as np from PIL import Image from transformers import AutoImageProcessor, AutoModelForDepthEstimation image_processor = AutoImageProcessor.from_pretrained("LiheYoung/depth-anything-base-hf") model = AutoModelForDepthEstimation.from_pretrained("LiheYoung/depth-anything-base-hf", dtype=torch.bfloat16) url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = image_processor(images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) post_processed_output = image_processor.post_process_depth_estimation( outputs, target_sizes=[(image.height, image.width)], ) predicted_depth = post_processed_output[0]["predicted_depth"] depth = (predicted_depth - predicted_depth.min()) / (predicted_depth.max() - predicted_depth.min()) depth = depth.detach().cpu().numpy() * 255 Image.fromarray(depth.astype("uint8")) ``` </hfoption> </hfoptions> ## Notes - [DepthAnythingV2](./depth_anything_v2), released in June 2024, uses the same architecture as Depth Anything and is compatible with all code examples and existing workflows. It uses synthetic data and a larger capacity teacher model to achieve much finer and robust depth predictions. ## DepthAnythingConfig [[autodoc]] DepthAnythingConfig ## DepthAnythingForDepthEstimation [[autodoc]] DepthAnythingForDepthEstimation - forward
transformers/docs/source/en/model_doc/depth_anything.md/0
{ "file_path": "transformers/docs/source/en/model_doc/depth_anything.md", "repo_id": "transformers", "token_count": 1174 }
392
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2025-06-06 and added to Hugging Face Transformers on 2025-06-25.* # dots.llm1 ## Overview The `dots.llm1` model was proposed in [dots.llm1 technical report](https://huggingface.co/papers/2506.05767) by rednote-hilab team. The abstract from the report is the following: *Mixture of Experts (MoE) models have emerged as a promising paradigm for scaling language models efficiently by activating only a subset of parameters for each input token. In this report, we present dots.llm1, a large-scale MoE model that activates 14B parameters out of a total of 142B parameters, delivering performance on par with state-of-the-art models while reducing training and inference costs. Leveraging our meticulously crafted and efficient data processing pipeline, dots.llm1 achieves performance comparable to Qwen2.5-72B after pretraining on high-quality corpus and post-training to fully unlock its capabilities. Notably, no synthetic data is used during pretraining. To foster further research, we open-source intermediate training checkpoints spanning the entire training process, providing valuable insights into the learning dynamics of large language models.* ## Dots1Config [[autodoc]] Dots1Config ## Dots1Model [[autodoc]] Dots1Model - forward ## Dots1ForCausalLM [[autodoc]] Dots1ForCausalLM - forward
transformers/docs/source/en/model_doc/dots1.md/0
{ "file_path": "transformers/docs/source/en/model_doc/dots1.md", "repo_id": "transformers", "token_count": 531 }
393
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2023-10-17 and added to Hugging Face Transformers on 2023-10-19.* # Fuyu <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The Fuyu model was created by [ADEPT](https://www.adept.ai/blog/fuyu-8b), and authored by Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, SaฤŸnak TaลŸฤฑrlar. The authors introduced Fuyu-8B, a decoder-only multimodal model based on the classic transformers architecture, with query and key normalization. A linear encoder is added to create multimodal embeddings from image inputs. By treating image tokens like text tokens and using a special image-newline character, the model knows when an image line ends. Image positional embeddings are removed. This avoids the need for different training phases for various image resolutions. With 8 billion parameters and licensed under CC-BY-NC, Fuyu-8B is notable for its ability to handle both text and images, its impressive context size of 16K, and its overall performance. <Tip warning={true}> The `Fuyu` models were trained using `bfloat16`, but the original inference uses `float16` The checkpoints uploaded on the hub use `dtype = 'float16'` which will be used by the `AutoModel` API to cast the checkpoints from `torch.float32` to `torch.float16`. The `dtype` of the online weights is mostly irrelevant, unless you are using `dtype="auto"` when initializing a model using `model = AutoModelForCausalLM.from_pretrained("path", dtype = "auto")`. The reason is that the model will first be downloaded ( using the `dtype` of the checkpoints online) then it will be cast to the default `dtype` of `torch` (becomes `torch.float32`). Users should specify the `dtype` they want, and if they don't it will be `torch.float32`. Finetuning the model in `float16` is not recommended and known to produce `nan`, as such the model should be fine-tuned in `bfloat16`. </Tip> Tips: - To convert the model, you need to clone the original repository using `git clone https://github.com/persimmon-ai-labs/adept-inference`, then get the checkpoints: ```bash git clone https://github.com/persimmon-ai-labs/adept-inference wget path/to/fuyu-8b-model-weights.tar tar -xvf fuyu-8b-model-weights.tar python src/transformers/models/fuyu/convert_fuyu_weights_to_hf.py --input_dir /path/to/downloaded/fuyu/weights/ --output_dir /output/path \ --pt_model_path /path/to/fuyu_8b_release/iter_0001251/mp_rank_00/model_optim_rng.pt --ada_lib_path /path/to/adept-inference ``` For the chat model: ```bash wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_chat_model_release.tar tar -xvf 8b_base_model_release.tar ``` Then, model can be loaded via: ```py from transformers import FuyuConfig, FuyuForCausalLM model_config = FuyuConfig() model = FuyuForCausalLM(model_config).from_pretrained('/output/path') ``` Inputs need to be passed through a specific Processor to have the correct formats. A processor requires an image_processor and a tokenizer. Hence, inputs can be loaded via: ```py from PIL import Image from transformers import AutoTokenizer from transformers.models.fuyu.processing_fuyu import FuyuProcessor from transformers.models.fuyu.image_processing_fuyu import FuyuImageProcessor tokenizer = AutoTokenizer.from_pretrained('adept-hf-collab/fuyu-8b') image_processor = FuyuImageProcessor() processor = FuyuProcessor(image_processor=image_processor, tokenizer=tokenizer) text_prompt = "Generate a coco-style caption.\\n" bus_image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" bus_image_pil = Image.open(io.BytesIO(requests.get(bus_image_url).content)) inputs_to_model = processor(images=bus_image_pil, text=text_prompt) ``` This model was contributed by [Molbap](https://huggingface.co/Molbap). The original code can be found [here](https://github.com/persimmon-ai-labs/adept-inference). - Fuyu uses a `sentencepiece` based tokenizer, with a `Unigram` model. It supports bytefallback, which is only available in `tokenizers==0.14.0` for the fast tokenizer. The `LlamaTokenizer` is used as it is a standard wrapper around sentencepiece. - The authors suggest to use the following prompt for image captioning: `f"Generate a coco-style caption.\\n"` ## FuyuConfig [[autodoc]] FuyuConfig ## FuyuModel [[autodoc]] FuyuModel ## FuyuForCausalLM [[autodoc]] FuyuForCausalLM - forward ## FuyuImageProcessor [[autodoc]] FuyuImageProcessor - __call__ ## FuyuProcessor [[autodoc]] FuyuProcessor - __call__
transformers/docs/source/en/model_doc/fuyu.md/0
{ "file_path": "transformers/docs/source/en/model_doc/fuyu.md", "repo_id": "transformers", "token_count": 1755 }
394
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2021-03-21 and added to Hugging Face Transformers on 2021-03-30.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> </div> </div> ## GPT-Neo [GPT-Neo](https://zenodo.org/records/5297715) is an open-source alternative to GPT-2 and GPT-3 models, built with Mesh TensorFlow for TPUs. GPT-Neo uses local attention in every other layer for more efficiency. It is trained on the [Pile](https://huggingface.co/datasets/EleutherAI/pile), a diverse dataset consisting of 22 smaller high-quality datasets. The original github repository can be found [here](https://github.com/EleutherAI/gpt-neo/tree/v1.1) You can find all the original GPT-Neo checkpoints under the [EleutherAI](https://huggingface.co/EleutherAI?search_models=gpt-neo) organization. > [!TIP] > Click on the GPT-Neo models in the right sidebar for more examples of how to apply GPT Neo to different language tasks. The example below demonstrates how to generate text with [`Pipeline`] or the [`AutoModel`], and from the command line. <hfoptions id="usage"> <hfoption id="Pipeline"> ```py import torch from transformers import pipeline pipeline = pipeline(task="text-generation", model="EleutherAI/gpt-neo-1.3B", dtype=torch.float16, device=0) pipeline("Hello, I'm a language model") ``` </hfoption> <hfoption id="AutoModel"> ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B", dtype=torch.float16, device_map="auto", attn_implementation="flash_attention_2") tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B") input_ids = tokenizer("Hello, I'm a language model", return_tensors="pt").to(model.device) output = model.generate(**input_ids) print(tokenizer.decode(output[0], skip_special_tokens=True)) ``` </hfoption> <hfoption id="transformers CLI"> ```bash echo -e "Hello, I'm a language model" | transformers-cli run --task text-generation --model EleutherAI/gpt-neo-1.3B --device 0 ``` </hfoption> </hfoptions> Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends. The example below uses [bitsandbytes](../quantization/bitsandbytes) to only quantize the weights to 4-bits. ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype="float16", bnb_4bit_use_double_quant=True ) model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-2.7B", quantization_config=quantization_config, device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B") inputs = tokenizer("Hello, I'm a language model", return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_new_tokens=100) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` ## Notes - Pad inputs on the right because GPT-Neo uses absolute position embeddings. ## GPTNeoConfig [[autodoc]] GPTNeoConfig ## GPTNeoModel [[autodoc]] GPTNeoModel - forward ## GPTNeoForCausalLM [[autodoc]] GPTNeoForCausalLM - forward ## GPTNeoForQuestionAnswering [[autodoc]] GPTNeoForQuestionAnswering - forward ## GPTNeoForSequenceClassification [[autodoc]] GPTNeoForSequenceClassification - forward ## GPTNeoForTokenClassification [[autodoc]] GPTNeoForTokenClassification - forward
transformers/docs/source/en/model_doc/gpt_neo.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gpt_neo.md", "repo_id": "transformers", "token_count": 1563 }
395
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2020-05-01 and added to Hugging Face Transformers on 2020-11-16.* # HerBERT <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The HerBERT model was proposed in [KLEJ: Comprehensive Benchmark for Polish Language Understanding](https://huggingface.co/papers/2005.00630) by Piotr Rybak, Robert Mroczkowski, Janusz Tracz, and Ireneusz Gawlik. It is a BERT-based Language Model trained on Polish Corpora using only MLM objective with dynamic masking of whole words. The abstract from the paper is the following: *In recent years, a series of Transformer-based models unlocked major improvements in general natural language understanding (NLU) tasks. Such a fast pace of research would not be possible without general NLU benchmarks, which allow for a fair comparison of the proposed methods. However, such benchmarks are available only for a handful of languages. To alleviate this issue, we introduce a comprehensive multi-task benchmark for the Polish language understanding, accompanied by an online leaderboard. It consists of a diverse set of tasks, adopted from existing datasets for named entity recognition, question-answering, textual entailment, and others. We also introduce a new sentiment analysis task for the e-commerce domain, named Allegro Reviews (AR). To ensure a common evaluation scheme and promote models that generalize to different NLU tasks, the benchmark includes datasets from varying domains and applications. Additionally, we release HerBERT, a Transformer-based model trained specifically for the Polish language, which has the best average performance and obtains the best results for three out of nine tasks. Finally, we provide an extensive evaluation, including several standard baselines and recently proposed, multilingual Transformer-based models.* This model was contributed by [rmroczkowski](https://huggingface.co/rmroczkowski). The original code can be found [here](https://github.com/allegro/HerBERT). ## Usage example ```python >>> from transformers import HerbertTokenizer, RobertaModel >>> tokenizer = HerbertTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1") >>> model = RobertaModel.from_pretrained("allegro/herbert-klej-cased-v1") >>> encoded_input = tokenizer.encode("Kto ma lepszฤ… sztukฤ™, ma lepszy rzฤ…d โ€“ to jasne.", return_tensors="pt") >>> outputs = model(encoded_input) >>> # HerBERT can also be loaded using AutoTokenizer and AutoModel: >>> import torch >>> from transformers import AutoModel, AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1") >>> model = AutoModel.from_pretrained("allegro/herbert-klej-cased-v1") ``` <Tip> Herbert implementation is the same as `BERT` except for the tokenization method. Refer to [BERT documentation](bert) for API reference and examples. </Tip> ## HerbertTokenizer [[autodoc]] HerbertTokenizer ## HerbertTokenizerFast [[autodoc]] HerbertTokenizerFast
transformers/docs/source/en/model_doc/herbert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/herbert.md", "repo_id": "transformers", "token_count": 1041 }
396
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2023-02-27 and added to Hugging Face Transformers on 2023-03-16.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="Tensor parallelism" src="https://img.shields.io/badge/Tensor%20parallelism-06b6d4?style=flat&logoColor=white"> </div> </div> # Llama [Llama](https://huggingface.co/papers/2302.13971) is a family of large language models ranging from 7B to 65B parameters. These models are focused on efficient inference (important for serving language models) by training a smaller model on more tokens rather than training a larger model on fewer tokens. The Llama model is based on the GPT architecture, but it uses pre-normalization to improve training stability, replaces ReLU with SwiGLU to improve performance, and replaces absolute positional embeddings with rotary positional embeddings (RoPE) to better handle longer sequence lengths. You can find all the original Llama checkpoints under the [Huggy Llama](https://huggingface.co/huggyllama) organization. > [!TIP] > Click on the Llama models in the right sidebar for more examples of how to apply Llama to different language tasks. The example below demonstrates how to generate text with [`Pipeline`] or the [`AutoModel`], and from the command line. <hfoptions id="usage"> <hfoption id="Pipeline"> ```py import torch from transformers import pipeline pipeline = pipeline( task="text-generation", model="huggyllama/llama-7b", dtype=torch.float16, device=0 ) pipeline("Plants create energy through a process known as") ``` </hfoption> <hfoption id="AutoModel"> ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( "huggyllama/llama-7b", ) model = AutoModelForCausalLM.from_pretrained( "huggyllama/llama-7b", dtype=torch.float16, device_map="auto", attn_implementation="sdpa" ) input_ids = tokenizer("Plants create energy through a process known as", return_tensors="pt").to(model.device) output = model.generate(**input_ids, cache_implementation="static") print(tokenizer.decode(output[0], skip_special_tokens=True)) ``` </hfoption> <hfoption id="transformers CLI"> ```bash echo -e "Plants create energy through a process known as" | transformers run --task text-generation --model huggyllama/llama-7b --device 0 ``` </hfoption> </hfoptions> Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends. The example below uses [torchao](../quantization/torchao) to only quantize the weights to int4. ```py # pip install torchao import torch from transformers import TorchAoConfig, AutoModelForCausalLM, AutoTokenizer quantization_config = TorchAoConfig("int4_weight_only", group_size=128) model = AutoModelForCausalLM.from_pretrained( "huggyllama/llama-30b", dtype=torch.bfloat16, device_map="auto", quantization_config=quantization_config ) tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-30b") input_ids = tokenizer("Plants create energy through a process known as", return_tensors="pt").to(model.device) output = model.generate(**input_ids, cache_implementation="static") print(tokenizer.decode(output[0], skip_special_tokens=True)) ``` Use the [AttentionMaskVisualizer](https://github.com/huggingface/transformers/blob/beb9b5b02246b9b7ee81ddf938f93f44cfeaad19/src/transformers/utils/attention_visualizer.py#L139) to better understand what tokens the model can and cannot attend to. ```py from transformers.utils.attention_visualizer import AttentionMaskVisualizer visualizer = AttentionMaskVisualizer("huggyllama/llama-7b") visualizer("Plants create energy through a process known as") ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/llama-attn-mask.png"/> </div> ## Notes - The tokenizer is a byte-pair encoding model based on [SentencePiece](https://github.com/google/sentencepiece). During decoding, if the first token is the start of the word (for example, "Banana"), the tokenizer doesn't prepend the prefix space to the string. ## LlamaConfig [[autodoc]] LlamaConfig ## LlamaTokenizer [[autodoc]] LlamaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## LlamaTokenizerFast [[autodoc]] LlamaTokenizerFast - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - update_post_processor - save_vocabulary ## LlamaModel [[autodoc]] LlamaModel - forward ## LlamaForCausalLM [[autodoc]] LlamaForCausalLM - forward ## LlamaForSequenceClassification [[autodoc]] LlamaForSequenceClassification - forward ## LlamaForQuestionAnswering [[autodoc]] LlamaForQuestionAnswering - forward ## LlamaForTokenClassification [[autodoc]] LlamaForTokenClassification - forward
transformers/docs/source/en/model_doc/llama.md/0
{ "file_path": "transformers/docs/source/en/model_doc/llama.md", "repo_id": "transformers", "token_count": 2040 }
397
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2018-04-01 and added to Hugging Face Transformers on 2020-11-16.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # MarianMT [MarianMT](https://huggingface.co/papers/1804.00344) is a machine translation model trained with the Marian framework which is written in pure C++. The framework includes its own custom auto-differentiation engine and efficient meta-algorithms to train encoder-decoder models like BART. All MarianMT models are transformer encoder-decoders with 6 layers in each component, use static sinusoidal positional embeddings, don't have a layernorm embedding, and the model starts generating with the prefix `pad_token_id` instead of `<s/>`. You can find all the original MarianMT checkpoints under the [Language Technology Research Group at the University of Helsinki](https://huggingface.co/Helsinki-NLP/models?search=opus-mt) organization. > [!TIP] > This model was contributed by [sshleifer](https://huggingface.co/sshleifer). > > Click on the MarianMT models in the right sidebar for more examples of how to apply MarianMT to translation tasks. The example below demonstrates how to translate text using [`Pipeline`] or the [`AutoModel`] class. <hfoptions id="usage"> <hfoption id="Pipeline"> ```python import torch from transformers import pipeline pipeline = pipeline("translation_en_to_de", model="Helsinki-NLP/opus-mt-en-de", dtype=torch.float16, device=0) pipeline("Hello, how are you?") ``` </hfoption> <hfoption id="AutoModel"> ```python import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-de", dtype=torch.float16, attn_implementation="sdpa", device_map="auto") inputs = tokenizer("Hello, how are you?", return_tensors="pt").to(model.device) outputs = model.generate(**inputs, cache_implementation="static") print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` </hfoption> </hfoptions> Use the [AttentionMaskVisualizer](https://github.com/huggingface/transformers/blob/beb9b5b02246b9b7ee81ddf938f93f44cfeaad19/src/transformers/utils/attention_visualizer.py#L139) to better understand what tokens the model can and cannot attend to. ```python from transformers.utils.attention_visualizer import AttentionMaskVisualizer visualizer = AttentionMaskVisualizer("Helsinki-NLP/opus-mt-en-de") visualizer("Hello, how are you?") ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/marianmt-attn-mask.png"/> </div> ## Notes - MarianMT models are ~298MB on disk and there are more than 1000 models. Check this [list](https://huggingface.co/Helsinki-NLP) for supported language pairs. The language codes may be inconsistent. Two digit codes can be found [here](https://developers.google.com/admin-sdk/directory/v1/languages) while three digit codes may require further searching. - Models that require BPE preprocessing are not supported. - All model names use the following format: `Helsinki-NLP/opus-mt-{src}-{tgt}`. Language codes formatted like `es_AR` usually refer to the `code_{region}`. For example, `es_AR` refers to Spanish from Argentina. - If a model can output multiple languages, prepend the desired output language to `src_txt` as shown below. New multilingual models from the [Tatoeba-Challenge](https://github.com/Helsinki-NLP/Tatoeba-Challenge) require 3 character language codes. ```python from transformers import MarianMTModel, MarianTokenizer # Model trained on multiple source languages โ†’ multiple target languages # Example: multilingual to Arabic (arb) model_name = "Helsinki-NLP/opus-mt-mul-mul" # Tatoeba Challenge model tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) # Prepend the desired output language code (3-letter ISO 639-3) src_texts = ["arb>> Hello, how are you today?"] # Tokenize and translate inputs = tokenizer(src_texts, return_tensors="pt", padding=True, truncation=True) translated = model.generate(**inputs) # Decode and print result translated_texts = tokenizer.batch_decode(translated, skip_special_tokens=True) print(translated_texts[0]) ``` - Older multilingual models use 2 character language codes. ```python from transformers import MarianMTModel, MarianTokenizer # Example: older multilingual model (like en โ†’ many) model_name = "Helsinki-NLP/opus-mt-en-ROMANCE" # English โ†’ French, Spanish, Italian, etc. tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) # Prepend the 2-letter ISO 639-1 target language code (older format) src_texts = [">>fr<< Hello, how are you today?"] # Tokenize and translate inputs = tokenizer(src_texts, return_tensors="pt", padding=True, truncation=True) translated = model.generate(**inputs) # Decode and print result translated_texts = tokenizer.batch_decode(translated, skip_special_tokens=True) print(translated_texts[0]) ``` ## MarianConfig [[autodoc]] MarianConfig ## MarianTokenizer [[autodoc]] MarianTokenizer - build_inputs_with_special_tokens ## MarianModel [[autodoc]] MarianModel - forward ## MarianMTModel [[autodoc]] MarianMTModel - forward ## MarianForCausalLM [[autodoc]] MarianForCausalLM - forward
transformers/docs/source/en/model_doc/marian.md/0
{ "file_path": "transformers/docs/source/en/model_doc/marian.md", "repo_id": "transformers", "token_count": 2085 }
398
<!--Copyright 2023 Mistral AI and The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2023-12-11 and added to Hugging Face Transformers on 2023-12-11.* # Mixtral <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="Tensor parallelism" src="https://img.shields.io/badge/Tensor%20parallelism-06b6d4?style=flat&logoColor=white"> </div> ## Overview [Mixtral-8x7B](https://huggingface.co/papers/2401.04088) was introduced in the [Mixtral of Experts blogpost](https://mistral.ai/news/mixtral-of-experts/) by Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lรฉlio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothรฉe Lacroix, William El Sayed. The introduction of the blog post says: *Today, the team is proud to release Mixtral 8x7B, a high-quality sparse mixture of experts models (SMoE) with open weights. Licensed under Apache 2.0. Mixtral outperforms Llama 2 70B on most benchmarks with 6x faster inference. It is the strongest open-weight model with a permissive license and the best model overall regarding cost/performance trade-offs. In particular, it matches or outperforms GPT3.5 on most standard benchmarks.* Mixtral-8x7B is the second large language model (LLM) released by [mistral.ai](https://mistral.ai/), after [Mistral-7B](mistral). ### Architectural details Mixtral-8x7B is a decoder-only Transformer with the following architectural choices: - Mixtral is a Mixture of Experts (MoE) model with 8 experts per MLP, with a total of 45 billion parameters. To learn more about mixture-of-experts, refer to the [blog post](https://huggingface.co/blog/moe). - Despite the model having 45 billion parameters, the compute required for a single forward pass is the same as that of a 14 billion parameter model. This is because even though each of the experts have to be loaded in RAM (70B like ram requirement) each token from the hidden states are dispatched twice (top 2 routing) and thus the compute (the operation required at each forward computation) is just 2 X sequence_length. The following implementation details are shared with Mistral AI's first model [Mistral-7B](mistral): - Sliding Window Attention - Trained with 8k context length and fixed cache size, with a theoretical attention span of 128K tokens - GQA (Grouped Query Attention) - allowing faster inference and lower cache size. - Byte-fallback BPE tokenizer - ensures that characters are never mapped to out of vocabulary tokens. For more details refer to the [release blog post](https://mistral.ai/news/mixtral-of-experts/). ### License `Mixtral-8x7B` is released under the Apache 2.0 license. ## Usage tips The Mistral team has released 2 checkpoints: - a base model, [Mixtral-8x7B-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1), which has been pre-trained to predict the next token on internet-scale data. - an instruction tuned model, [Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1), which is the base model optimized for chat purposes using supervised fine-tuning (SFT) and direct preference optimization (DPO). The base model can be used as follows: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1", device_map="auto") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1") >>> prompt = "My favourite condiment is" >>> model_inputs = tokenizer([prompt], return_tensors="pt").to(model.device) >>> generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True) >>> tokenizer.batch_decode(generated_ids)[0] "My favourite condiment is to ..." ``` The instruction tuned model can be used as follows: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1", device_map="auto") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1") >>> messages = [ ... {"role": "user", "content": "What is your favourite condiment?"}, ... {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}, ... {"role": "user", "content": "Do you have mayonnaise recipes?"} ... ] >>> model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device) >>> generated_ids = model.generate(model_inputs, max_new_tokens=100, do_sample=True) >>> tokenizer.batch_decode(generated_ids)[0] "Mayonnaise can be made as follows: (...)" ``` As can be seen, the instruction-tuned model requires a [chat template](../chat_templating) to be applied to make sure the inputs are prepared in the right format. ## Speeding up Mixtral by using Flash Attention The code snippets above showcase inference without any optimization tricks. However, one can drastically speed up the model by leveraging [Flash Attention](../perf_train_gpu_one#flash-attention-2), which is a faster implementation of the attention mechanism used inside the model. First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature. ```bash pip install -U flash-attn --no-build-isolation ``` Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of the [flash attention repository](https://github.com/Dao-AILab/flash-attention). Make also sure to load your model in half-precision (e.g. `torch.float16`) To load and run a model using Flash Attention-2, refer to the snippet below: ```python >>> import torch >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1", dtype=torch.float16, attn_implementation="flash_attention_2", device_map="auto") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1") >>> prompt = "My favourite condiment is" >>> model_inputs = tokenizer([prompt], return_tensors="pt").to(model.device) >>> generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True) >>> tokenizer.batch_decode(generated_ids)[0] "The expected output" ``` ### Expected speedups Below is a expected speedup diagram that compares pure inference time between the native implementation in transformers using `mistralai/Mixtral-8x7B-v0.1` checkpoint and the Flash Attention 2 version of the model. <div style="text-align: center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/mixtral-7b-inference-large-seqlen.png"> </div> ### Sliding window Attention The current implementation supports the sliding window attention mechanism and memory efficient cache management. To enable sliding window attention, just make sure to have a `flash-attn` version that is compatible with sliding window attention (`>=2.3.0`). The Flash Attention-2 model uses also a more memory efficient cache slicing mechanism - as recommended per the official implementation of Mistral model that use rolling cache mechanism we keep the cache size fixed (`self.config.sliding_window`), support batched generation only for `padding_side="left"` and use the absolute position of the current token to compute the positional embedding. ## Shrinking down Mixtral using quantization As the Mixtral model has 45 billion parameters, that would require about 90GB of GPU RAM in half precision (float16), since each parameter is stored in 2 bytes. However, one can shrink down the size of the model using [quantization](../quantization). If the model is quantized to 4 bits (or half a byte per parameter), a single A100 with 40GB of RAM is enough to fit the entire model, as in that case only about 27 GB of RAM is required. Quantizing a model is as simple as passing a `quantization_config` to the model. Below, we'll leverage the bitsandbytes quantization library (but refer to [this page](../quantization) for alternative quantization methods): ```python >>> import torch >>> from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig >>> # specify how to quantize the model >>> quantization_config = BitsAndBytesConfig( ... load_in_4bit=True, ... bnb_4bit_quant_type="nf4", ... bnb_4bit_compute_dtype="torch.float16", ... ) >>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1", quantization_config=True, device_map="auto") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1") >>> prompt = "My favourite condiment is" >>> messages = [ ... {"role": "user", "content": "What is your favourite condiment?"}, ... {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}, ... {"role": "user", "content": "Do you have mayonnaise recipes?"} ... ] >>> model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device) >>> generated_ids = model.generate(model_inputs, max_new_tokens=100, do_sample=True) >>> tokenizer.batch_decode(generated_ids)[0] "The expected output" ``` This model was contributed by [Younes Belkada](https://huggingface.co/ybelkada) and [Arthur Zucker](https://huggingface.co/ArthurZ) . The original code can be found [here](https://github.com/mistralai/mistral-src). ## Resources A list of official Hugging Face and community (indicated by ๐ŸŒŽ) resources to help you get started with Mixtral. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-generation"/> - A demo notebook to perform supervised fine-tuning (SFT) of Mixtral-8x7B can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/Mistral/Supervised_fine_tuning_(SFT)_of_an_LLM_using_Hugging_Face_tooling.ipynb). ๐ŸŒŽ - A [blog post](https://medium.com/@prakharsaxena11111/finetuning-mixtral-7bx8-6071b0ebf114) on fine-tuning Mixtral-8x7B using PEFT. ๐ŸŒŽ - The [Alignment Handbook](https://github.com/huggingface/alignment-handbook) by Hugging Face includes scripts and recipes to perform supervised fine-tuning (SFT) and direct preference optimization with Mistral-7B. This includes scripts for full fine-tuning, QLoRa on a single accelerator as well as multi-accelerator fine-tuning. - [Causal language modeling task guide](../tasks/language_modeling) ## MixtralConfig [[autodoc]] MixtralConfig ## MistralCommonTokenizer [[autodoc]] MistralCommonTokenizer ## MixtralModel [[autodoc]] MixtralModel - forward ## MixtralForCausalLM [[autodoc]] MixtralForCausalLM - forward ## MixtralForSequenceClassification [[autodoc]] MixtralForSequenceClassification - forward ## MixtralForTokenClassification [[autodoc]] MixtralForTokenClassification - forward ## MixtralForQuestionAnswering [[autodoc]] MixtralForQuestionAnswering - forward
transformers/docs/source/en/model_doc/mixtral.md/0
{ "file_path": "transformers/docs/source/en/model_doc/mixtral.md", "repo_id": "transformers", "token_count": 3745 }
399
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2023-05-05 and added to Hugging Face Transformers on 2023-07-25.* # MPT <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The MPT model was proposed by the [MosaicML](https://www.mosaicml.com/) team and released with multiple sizes and finetuned variants. The MPT models are a series of open source and commercially usable LLMs pre-trained on 1T tokens. MPT models are GPT-style decoder-only transformers with several improvements: performance-optimized layer implementations, architecture changes that provide greater training stability, and the elimination of context length limits by replacing positional embeddings with ALiBi. - MPT base: MPT base pre-trained models on next token prediction - MPT instruct: MPT base models fine-tuned on instruction based tasks - MPT storywriter: MPT base models fine-tuned for 2500 steps on 65k-token excerpts of fiction books contained in the books3 corpus, this enables the model to handle very long sequences The original code is available at the [`llm-foundry`](https://github.com/mosaicml/llm-foundry/tree/main) repository. Read more about it [in the release blogpost](https://www.mosaicml.com/blog/mpt-7b) ## Usage tips - Learn more about some techniques behind training of the model [in this section of llm-foundry repository](https://github.com/mosaicml/llm-foundry/blob/main/TUTORIAL.md#faqs) - If you want to use the advanced version of the model (triton kernels, direct flash attention integration), you can still use the original model implementation by adding `trust_remote_code=True` when calling `from_pretrained`. ## Resources - [Fine-tuning Notebook](https://colab.research.google.com/drive/1HCpQkLL7UXW8xJUJJ29X7QAeNJKO0frZ?usp=sharing) on how to fine-tune MPT-7B on a free Google Colab instance to turn the model into a Chatbot. ## MptConfig [[autodoc]] MptConfig - all ## MptModel [[autodoc]] MptModel - forward ## MptForCausalLM [[autodoc]] MptForCausalLM - forward ## MptForSequenceClassification [[autodoc]] MptForSequenceClassification - forward ## MptForTokenClassification [[autodoc]] MptForTokenClassification - forward ## MptForQuestionAnswering [[autodoc]] MptForQuestionAnswering - forward
transformers/docs/source/en/model_doc/mpt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/mpt.md", "repo_id": "transformers", "token_count": 920 }
400
<!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2024-09-03 and added to Hugging Face Transformers on 2024-09-03.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # OLMoE [OLMoE](https://huggingface.co/papers/2409.02060) is a sparse Mixture-of-Experts (MoE) language model with 7B parameters but only 1B parameters are used per input token. It has similar inference costs as dense models but trains ~3x faster. OLMoE uses fine-grained routing with 64 small experts in each layer and uses a dropless token-based routing algorithm. You can find all the original OLMoE checkpoints under the [OLMoE](https://huggingface.co/collections/allenai/olmoe-november-2024-66cf678c047657a30c8cd3da) collection. > [!TIP] > This model was contributed by [Muennighoff](https://huggingface.co/Muennighoff). > > Click on the OLMoE models in the right sidebar for more examples of how to apply OLMoE to different language tasks. The example below demonstrates how to generate text with [`Pipeline`] or the [`AutoModel`] class. <hfoptions id="usage"> <hfoption id="Pipeline"> ```py import torch from transformers import pipeline pipe = pipeline( task="text-generation", model="allenai/OLMoE-1B-7B-0125", dtype=torch.float16, device=0, ) result = pipe("Dionysus is the god of") print(result) ``` </hfoption> <hfoption id="AutoModel"> ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device device = infer_device() model = AutoModelForCausalLM.from_pretrained("allenai/OLMoE-1B-7B-0924", attn_implementation="sdpa", dtype="auto", device_map="auto").to(device) tokenizer = AutoTokenizer.from_pretrained("allenai/OLMoE-1B-7B-0924") inputs = tokenizer("Bitcoin is", return_tensors="pt") inputs = {k: v.to(device) for k, v in inputs.items()} output = model.generate(**inputs, max_length=64) print(tokenizer.decode(output[0])) ``` ## Quantization Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends. The example below uses [bitsandbytes](../quantization/bitsandbytes) to only quantize the weights to 4-bits. ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, infer_device device = infer_device() quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4" ) model = AutoModelForCausalLM.from_pretrained("allenai/OLMoE-1B-7B-0924", attn_implementation="sdpa", dtype="auto", device_map="auto", quantization_config=quantization_config).to(device) tokenizer = AutoTokenizer.from_pretrained("allenai/OLMoE-1B-7B-0924") inputs = tokenizer("Bitcoin is", return_tensors="pt") inputs = {k: v.to(device) for k, v in inputs.items()} output = model.generate(**inputs, max_length=64) print(tokenizer.decode(output[0])) ``` ## OlmoeConfig [[autodoc]] OlmoeConfig ## OlmoeModel [[autodoc]] OlmoeModel - forward ## OlmoeForCausalLM [[autodoc]] OlmoeForCausalLM - forward
transformers/docs/source/en/model_doc/olmoe.md/0
{ "file_path": "transformers/docs/source/en/model_doc/olmoe.md", "repo_id": "transformers", "token_count": 1418 }
401
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2023-09-07 and added to Hugging Face Transformers on 2023-09-12.* # Persimmon <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The Persimmon model was created by [ADEPT](https://www.adept.ai/blog/persimmon-8b), and authored by Erich Elsen, Augustus Odena, Maxwell Nye, SaฤŸnak TaลŸฤฑrlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani. The authors introduced Persimmon-8B, a decoder model based on the classic transformers architecture, with query and key normalization. Persimmon-8B is a fully permissively-licensed model with approximately 8 billion parameters, released under the Apache license. Some of the key attributes of Persimmon-8B are long context size (16K), performance, and capabilities for multimodal extensions. The authors showcase their approach to model evaluation, focusing on practical text generation, mirroring how users interact with language models. The work also includes a comparative analysis, pitting Persimmon-8B against other prominent models (MPT 7B Instruct and Llama 2 Base 7B 1-Shot), across various evaluation tasks. The results demonstrate Persimmon-8B's competitive performance, even with limited training data. In terms of model details, the work outlines the architecture and training methodology of Persimmon-8B, providing insights into its design choices, sequence length, and dataset composition. The authors present a fast inference code that outperforms traditional implementations through operator fusion and CUDA graph utilization while maintaining code coherence. They express their anticipation of how the community will leverage this contribution to drive innovation, hinting at further upcoming releases as part of an ongoing series of developments. This model was contributed by [ArthurZ](https://huggingface.co/ArthurZ). The original code can be found [here](https://github.com/persimmon-ai-labs/adept-inference). ## Usage tips <Tip warning={true}> The `Persimmon` models were trained using `bfloat16`, but the original inference uses `float16` The checkpoints uploaded on the hub use `dtype = 'float16'` which will be used by the `AutoModel` API to cast the checkpoints from `torch.float32` to `torch.float16`. The `dtype` of the online weights is mostly irrelevant, unless you are using `dtype="auto"` when initializing a model using `model = AutoModelForCausalLM.from_pretrained("path", dtype = "auto")`. The reason is that the model will first be downloaded ( using the `dtype` of the checkpoints online) then it will be cast to the default `dtype` of `torch` (becomes `torch.float32`). Users should specify the `dtype` they want, and if they don't it will be `torch.float32`. Finetuning the model in `float16` is not recommended and known to produce `nan`, as such the model should be fine-tuned in `bfloat16`. </Tip> Tips: - To convert the model, you need to clone the original repository using `git clone https://github.com/persimmon-ai-labs/adept-inference`, then get the checkpoints: ```bash git clone https://github.com/persimmon-ai-labs/adept-inference wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_base_model_release.tar tar -xvf 8b_base_model_release.tar python src/transformers/models/persimmon/convert_persimmon_weights_to_hf.py --input_dir /path/to/downloaded/persimmon/weights/ --output_dir /output/path \ --pt_model_path /path/to/8b_chat_model_release/iter_0001251/mp_rank_00/model_optim_rng.pt --ada_lib_path /path/to/adept-inference ``` For the chat model: ```bash wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_chat_model_release.tar tar -xvf 8b_base_model_release.tar ``` Thereafter, models can be loaded via: ```py from transformers import PersimmonForCausalLM, PersimmonTokenizer model = PersimmonForCausalLM.from_pretrained("/output/path") tokenizer = PersimmonTokenizer.from_pretrained("/output/path") ``` - Perismmon uses a `sentencepiece` based tokenizer, with a `Unigram` model. It supports bytefallback, which is only available in `tokenizers==0.14.0` for the fast tokenizer. The `LlamaTokenizer` is used as it is a standard wrapper around sentencepiece. The `chat` template will be updated with the templating functions in a follow up PR! - The authors suggest to use the following prompt format for the chat mode: `f"human: {prompt}\n\nadept:"` ## PersimmonConfig [[autodoc]] PersimmonConfig ## PersimmonModel [[autodoc]] PersimmonModel - forward ## PersimmonForCausalLM [[autodoc]] PersimmonForCausalLM - forward ## PersimmonForSequenceClassification [[autodoc]] PersimmonForSequenceClassification - forward ## PersimmonForTokenClassification [[autodoc]] PersimmonForTokenClassification - forward
transformers/docs/source/en/model_doc/persimmon.md/0
{ "file_path": "transformers/docs/source/en/model_doc/persimmon.md", "repo_id": "transformers", "token_count": 1679 }
402
<!--Copyright 2024 The Qwen Team and The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2024-07-15 and added to Hugging Face Transformers on 2024-01-17.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="Tensor parallelism" src="https://img.shields.io/badge/Tensor%20parallelism-06b6d4?style=flat&logoColor=white"> </div> </div> # Qwen2 [Qwen2](https://huggingface.co/papers/2407.10671) is a family of large language models (pretrained, instruction-tuned and mixture-of-experts) available in sizes from 0.5B to 72B parameters. The models are built on the Transformer architecture featuring enhancements like group query attention (GQA), rotary positional embeddings (RoPE), a mix of sliding window and full attention, and dual chunk attention with YARN for training stability. Qwen2 models support multiple languages and context lengths up to 131,072 tokens. You can find all the official Qwen2 checkpoints under the [Qwen2](https://huggingface.co/collections/Qwen/qwen2-6659360b33528ced941e557f) collection. > [!TIP] > Click on the Qwen2 models in the right sidebar for more examples of how to apply Qwen2 to different language tasks. The example below demonstrates how to generate text with [`Pipeline`], [`AutoModel`], and from the command line using the instruction-tuned models. <hfoptions id="usage"> <hfoption id="Pipeline"> ```python import torch from transformers import pipeline pipe = pipeline( task="text-generation", model="Qwen/Qwen2-1.5B-Instruct", dtype=torch.bfloat16, device_map=0 ) messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Tell me about the Qwen2 model family."}, ] outputs = pipe(messages, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"][-1]['content']) ``` </hfoption> <hfoption id="AutoModel"> ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained( "Qwen/Qwen2-1.5B-Instruct", dtype=torch.bfloat16, device_map="auto", attn_implementation="sdpa" ) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-1.5B-Instruct") prompt = "Give me a short introduction to large language models." messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt} ] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) model_inputs = tokenizer([text], return_tensors="pt").to(model.device) generated_ids = model.generate( model_inputs.input_ids, cache_implementation="static", max_new_tokens=512, do_sample=True, temperature=0.7, top_k=50, top_p=0.95 ) generated_ids = [ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) ] response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] print(response) ``` </hfoption> <hfoption id="transformers CLI"> ```bash # pip install -U flash-attn --no-build-isolation transformers chat Qwen/Qwen2-7B-Instruct --dtype auto --attn_implementation flash_attention_2 --device 0 ``` </hfoption> </hfoptions> Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends. The example below uses [bitsandbytes](../quantization/bitsandbytes) to quantize the weights to 4-bits. ```python # pip install -U flash-attn --no-build-isolation import torch from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, ) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-7B") model = AutoModelForCausalLM.from_pretrained( "Qwen/Qwen2-7B", dtype=torch.bfloat16, device_map="auto", quantization_config=quantization_config, attn_implementation="flash_attention_2" ) inputs = tokenizer("The Qwen2 model family is", return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_new_tokens=100) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` ## Notes - Ensure your Transformers library version is up-to-date. Qwen2 requires Transformers>=4.37.0 for full support. ## Qwen2Config [[autodoc]] Qwen2Config ## Qwen2Tokenizer [[autodoc]] Qwen2Tokenizer - save_vocabulary ## Qwen2TokenizerFast [[autodoc]] Qwen2TokenizerFast ## Qwen2RMSNorm [[autodoc]] Qwen2RMSNorm - forward ## Qwen2Model [[autodoc]] Qwen2Model - forward ## Qwen2ForCausalLM [[autodoc]] Qwen2ForCausalLM - forward ## Qwen2ForSequenceClassification [[autodoc]] Qwen2ForSequenceClassification - forward ## Qwen2ForTokenClassification [[autodoc]] Qwen2ForTokenClassification - forward ## Qwen2ForQuestionAnswering [[autodoc]] Qwen2ForQuestionAnswering - forward
transformers/docs/source/en/model_doc/qwen2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/qwen2.md", "repo_id": "transformers", "token_count": 2166 }
403
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2019-04-01 and added to Hugging Face Transformers on 2022-12-19.* # RoBERTa-PreLayerNorm <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The RoBERTa-PreLayerNorm model was proposed in [fairseq: A Fast, Extensible Toolkit for Sequence Modeling](https://huggingface.co/papers/1904.01038) by Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, Michael Auli. It is identical to using the `--encoder-normalize-before` flag in [fairseq](https://fairseq.readthedocs.io/). The abstract from the paper is the following: *fairseq is an open-source sequence modeling toolkit that allows researchers and developers to train custom models for translation, summarization, language modeling, and other text generation tasks. The toolkit is based on PyTorch and supports distributed training across multiple GPUs and machines. We also support fast mixed-precision training and inference on modern GPUs.* This model was contributed by [andreasmaden](https://huggingface.co/andreasmadsen). The original code can be found [here](https://github.com/princeton-nlp/DinkyTrain). ## Usage tips - The implementation is the same as [Roberta](roberta) except instead of using _Add and Norm_ it does _Norm and Add_. _Add_ and _Norm_ refers to the Addition and LayerNormalization as described in [Attention Is All You Need](https://huggingface.co/papers/1706.03762). - This is identical to using the `--encoder-normalize-before` flag in [fairseq](https://fairseq.readthedocs.io/). ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## RobertaPreLayerNormConfig [[autodoc]] RobertaPreLayerNormConfig ## RobertaPreLayerNormModel [[autodoc]] RobertaPreLayerNormModel - forward ## RobertaPreLayerNormForCausalLM [[autodoc]] RobertaPreLayerNormForCausalLM - forward ## RobertaPreLayerNormForMaskedLM [[autodoc]] RobertaPreLayerNormForMaskedLM - forward ## RobertaPreLayerNormForSequenceClassification [[autodoc]] RobertaPreLayerNormForSequenceClassification - forward ## RobertaPreLayerNormForMultipleChoice [[autodoc]] RobertaPreLayerNormForMultipleChoice - forward ## RobertaPreLayerNormForTokenClassification [[autodoc]] RobertaPreLayerNormForTokenClassification - forward ## RobertaPreLayerNormForQuestionAnswering [[autodoc]] RobertaPreLayerNormForQuestionAnswering - forward
transformers/docs/source/en/model_doc/roberta-prelayernorm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/roberta-prelayernorm.md", "repo_id": "transformers", "token_count": 1037 }
404
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2021-09-14 and added to Hugging Face Transformers on 2021-10-15.* # SEW-D <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview SEW-D (Squeezed and Efficient Wav2Vec with Disentangled attention) was proposed in [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://huggingface.co/papers/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. The abstract from the paper is the following: *This paper is a study of performance-efficiency trade-offs in pre-trained models for automatic speech recognition (ASR). We focus on wav2vec 2.0, and formalize several architecture designs that influence both the model performance and its efficiency. Putting together all our observations, we introduce SEW (Squeezed and Efficient Wav2vec), a pre-trained model architecture with significant improvements along both performance and efficiency dimensions across a variety of training setups. For example, under the 100h-960h semi-supervised setup on LibriSpeech, SEW achieves a 1.9x inference speedup compared to wav2vec 2.0, with a 13.5% relative reduction in word error rate. With a similar inference time, SEW reduces word error rate by 25-50% across different model sizes.* This model was contributed by [anton-l](https://huggingface.co/anton-l). ## Usage tips - SEW-D is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. - SEWDForCTC is fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. ## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) ## SEWDConfig [[autodoc]] SEWDConfig ## SEWDModel [[autodoc]] SEWDModel - forward ## SEWDForCTC [[autodoc]] SEWDForCTC - forward ## SEWDForSequenceClassification [[autodoc]] SEWDForSequenceClassification - forward
transformers/docs/source/en/model_doc/sew-d.md/0
{ "file_path": "transformers/docs/source/en/model_doc/sew-d.md", "repo_id": "transformers", "token_count": 827 }
405
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the MIT License; you may not use this file except in compliance with the License. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2017-12-20 and added to Hugging Face Transformers on 2024-03-19.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white" > </div> </div> # SuperPoint [SuperPoint](https://huggingface.co/papers/1712.07629) is the result of self-supervised training of a fully-convolutional network for interest point detection and description. The model is able to detect interest points that are repeatable under homographic transformations and provide a descriptor for each point. Usage on it's own is limited, but it can be used as a feature extractor for other tasks such as homography estimation and image matching. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/superpoint_architecture.png" alt="drawing" width="500"/> You can find all the original SuperPoint checkpoints under the [Magic Leap Community](https://huggingface.co/magic-leap-community) organization. > [!TIP] > This model was contributed by [stevenbucaille](https://huggingface.co/stevenbucaille). > > Click on the SuperPoint models in the right sidebar for more examples of how to apply SuperPoint to different computer vision tasks. The example below demonstrates how to detect interest points in an image with the [`AutoModel`] class. <hfoptions id="usage"> <hfoption id="AutoModel"> ```py from transformers import AutoImageProcessor, SuperPointForKeypointDetection import torch from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint") model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint") inputs = processor(image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) # Post-process to get keypoints, scores, and descriptors image_size = (image.height, image.width) processed_outputs = processor.post_process_keypoint_detection(outputs, [image_size]) ``` </hfoption> </hfoptions> ## Notes - SuperPoint outputs a dynamic number of keypoints per image, which makes it suitable for tasks requiring variable-length feature representations. ```py from transformers import AutoImageProcessor, SuperPointForKeypointDetection import torch from PIL import Image import requests processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint") model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint") url_image_1 = "http://images.cocodataset.org/val2017/000000039769.jpg" image_1 = Image.open(requests.get(url_image_1, stream=True).raw) url_image_2 = "http://images.cocodataset.org/test-stuff2017/000000000568.jpg" image_2 = Image.open(requests.get(url_image_2, stream=True).raw) images = [image_1, image_2] inputs = processor(images, return_tensors="pt") # Example of handling dynamic keypoint output outputs = model(**inputs) keypoints = outputs.keypoints # Shape varies per image scores = outputs.scores # Confidence scores for each keypoint descriptors = outputs.descriptors # 256-dimensional descriptors mask = outputs.mask # Value of 1 corresponds to a keypoint detection ``` - The model provides both keypoint coordinates and their corresponding descriptors (256-dimensional vectors) in a single forward pass. - For batch processing with multiple images, you need to use the mask attribute to retrieve the respective information for each image. You can use the `post_process_keypoint_detection` from the `SuperPointImageProcessor` to retrieve the each image information. ```py # Batch processing example images = [image1, image2, image3] inputs = processor(images, return_tensors="pt") outputs = model(**inputs) image_sizes = [(img.height, img.width) for img in images] processed_outputs = processor.post_process_keypoint_detection(outputs, image_sizes) ``` - You can then print the keypoints on the image of your choice to visualize the result: ```py import matplotlib.pyplot as plt plt.axis("off") plt.imshow(image_1) plt.scatter( outputs[0]["keypoints"][:, 0], outputs[0]["keypoints"][:, 1], c=outputs[0]["scores"] * 100, s=outputs[0]["scores"] * 50, alpha=0.8 ) plt.savefig(f"output_image.png") ``` <div class="flex justify-center"> <img src="https://cdn-uploads.huggingface.co/production/uploads/632885ba1558dac67c440aa8/ZtFmphEhx8tcbEQqOolyE.png"> </div> ## Resources - Refer to this [notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SuperPoint/Inference_with_SuperPoint_to_detect_interest_points_in_an_image.ipynb) for an inference and visualization example. ## SuperPointConfig [[autodoc]] SuperPointConfig ## SuperPointImageProcessor [[autodoc]] SuperPointImageProcessor - preprocess ## SuperPointImageProcessorFast [[autodoc]] SuperPointImageProcessorFast - preprocess - post_process_keypoint_detection ## SuperPointForKeypointDetection [[autodoc]] SuperPointForKeypointDetection - forward
transformers/docs/source/en/model_doc/superpoint.md/0
{ "file_path": "transformers/docs/source/en/model_doc/superpoint.md", "repo_id": "transformers", "token_count": 1884 }
406
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # TimmWrapper <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview Helper class to enable loading timm models to be used with the transformers library and its autoclasses. ```python >>> import torch >>> from PIL import Image >>> from urllib.request import urlopen >>> from transformers import AutoModelForImageClassification, AutoImageProcessor >>> # Load image >>> image = Image.open(urlopen( ... 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' ... )) >>> # Load model and image processor >>> checkpoint = "timm/resnet50.a1_in1k" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint) >>> model = AutoModelForImageClassification.from_pretrained(checkpoint).eval() >>> # Preprocess image >>> inputs = image_processor(image) >>> # Forward pass >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # Get top 5 predictions >>> top5_probabilities, top5_class_indices = torch.topk(logits.softmax(dim=1) * 100, k=5) ``` ## Resources: A list of official Hugging Face and community (indicated by ๐ŸŒŽ) resources to help you get started with TimmWrapper. <PipelineTag pipeline="image-classification"/> - [Collection of Example Notebook](https://github.com/ariG23498/timm-wrapper-examples) ๐ŸŒŽ > [!TIP] > For a more detailed overview please read the [official blog post](https://huggingface.co/blog/timm-transformers) on the timm integration. ## TimmWrapperConfig [[autodoc]] TimmWrapperConfig ## TimmWrapperImageProcessor [[autodoc]] TimmWrapperImageProcessor - preprocess ## TimmWrapperModel [[autodoc]] TimmWrapperModel - forward ## TimmWrapperForImageClassification [[autodoc]] TimmWrapperForImageClassification - forward
transformers/docs/source/en/model_doc/timm_wrapper.md/0
{ "file_path": "transformers/docs/source/en/model_doc/timm_wrapper.md", "repo_id": "transformers", "token_count": 800 }
407
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2021-02-05 and added to Hugging Face Transformers on 2022-01-19.* # ViLT <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The ViLT model was proposed in [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://huggingface.co/papers/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim. ViLT incorporates text embeddings into a Vision Transformer (ViT), allowing it to have a minimal design for Vision-and-Language Pre-training (VLP). The abstract from the paper is the following: *Vision-and-Language Pre-training (VLP) has improved performance on various joint vision-and-language downstream tasks. Current approaches to VLP heavily rely on image feature extraction processes, most of which involve region supervision (e.g., object detection) and the convolutional architecture (e.g., ResNet). Although disregarded in the literature, we find it problematic in terms of both (1) efficiency/speed, that simply extracting input features requires much more computation than the multimodal interaction steps; and (2) expressive power, as it is upper bounded to the expressive power of the visual embedder and its predefined visual vocabulary. In this paper, we present a minimal VLP model, Vision-and-Language Transformer (ViLT), monolithic in the sense that the processing of visual inputs is drastically simplified to just the same convolution-free manner that we process textual inputs. We show that ViLT is up to tens of times faster than previous VLP models, yet with competitive or better downstream task performance.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vilt_architecture.jpg" alt="drawing" width="600"/> <small> ViLT architecture. Taken from the <a href="https://huggingface.co/papers/2102.03334">original paper</a>. </small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/dandelin/ViLT). ## Usage tips - The quickest way to get started with ViLT is by checking the [example notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/ViLT) (which showcase both inference and fine-tuning on custom data). - ViLT is a model that takes both `pixel_values` and `input_ids` as input. One can use [`ViltProcessor`] to prepare data for the model. This processor wraps a image processor (for the image modality) and a tokenizer (for the language modality) into one. - ViLT is trained with images of various sizes: the authors resize the shorter edge of input images to 384 and limit the longer edge to under 640 while preserving the aspect ratio. To make batching of images possible, the authors use a `pixel_mask` that indicates which pixel values are real and which are padding. [`ViltProcessor`] automatically creates this for you. - The design of ViLT is very similar to that of a standard Vision Transformer (ViT). The only difference is that the model includes additional embedding layers for the language modality. - The PyTorch version of this model is only available in torch 1.10 and higher. ## ViltConfig [[autodoc]] ViltConfig ## ViltFeatureExtractor [[autodoc]] ViltFeatureExtractor - __call__ ## ViltImageProcessor [[autodoc]] ViltImageProcessor - preprocess ## ViltImageProcessorFast [[autodoc]] ViltImageProcessorFast - preprocess ## ViltProcessor [[autodoc]] ViltProcessor - __call__ ## ViltModel [[autodoc]] ViltModel - forward ## ViltForMaskedLM [[autodoc]] ViltForMaskedLM - forward ## ViltForQuestionAnswering [[autodoc]] ViltForQuestionAnswering - forward ## ViltForImagesAndTextClassification [[autodoc]] ViltForImagesAndTextClassification - forward ## ViltForImageAndTextRetrieval [[autodoc]] ViltForImageAndTextRetrieval - forward ## ViltForTokenClassification [[autodoc]] ViltForTokenClassification - forward
transformers/docs/source/en/model_doc/vilt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vilt.md", "repo_id": "transformers", "token_count": 1350 }
408
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> *This model was released on 2020-06-24 and added to Hugging Face Transformers on 2023-06-20.* # XLSR-Wav2Vec2 <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## Overview The XLSR-Wav2Vec2 model was proposed in [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://huggingface.co/papers/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli. The abstract from the paper is the following: *This paper presents XLSR which learns cross-lingual speech representations by pretraining a single model from the raw waveform of speech in multiple languages. We build on wav2vec 2.0 which is trained by solving a contrastive task over masked latent speech representations and jointly learns a quantization of the latents shared across languages. The resulting model is fine-tuned on labeled data and experiments show that cross-lingual pretraining significantly outperforms monolingual pretraining. On the CommonVoice benchmark, XLSR shows a relative phoneme error rate reduction of 72% compared to the best known results. On BABEL, our approach improves word error rate by 16% relative compared to a comparable system. Our approach enables a single multilingual speech recognition model which is competitive to strong individual models. Analysis shows that the latent discrete speech representations are shared across languages with increased sharing for related languages. We hope to catalyze research in low-resource speech understanding by releasing XLSR-53, a large model pretrained in 53 languages.* The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/fairseq/models/wav2vec). Note: Meta (FAIR) released a new version of [Wav2Vec2-BERT 2.0](https://huggingface.co/docs/transformers/en/model_doc/wav2vec2-bert) - it's pretrained on 4.5M hours of audio. We especially recommend using it for fine-tuning tasks, e.g. as per [this guide](https://huggingface.co/blog/fine-tune-w2v2-bert). ## Usage tips - XLSR-Wav2Vec2 is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. - XLSR-Wav2Vec2 model was trained using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. <Tip> XLSR-Wav2Vec2's architecture is based on the Wav2Vec2 model, so one can refer to [Wav2Vec2's documentation page](wav2vec2). </Tip>
transformers/docs/source/en/model_doc/xlsr_wav2vec2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlsr_wav2vec2.md", "repo_id": "transformers", "token_count": 909 }
409
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT [[open-in-colab]] [PEFT](https://huggingface.co/docs/peft/index), a library of parameter-efficient fine-tuning methods, enables training and storing large models on consumer GPUs. These methods only fine-tune a small number of extra model parameters, also known as adapters, on top of the pretrained model. A significant amount of memory is saved because the GPU doesn't need to store the optimizer states and gradients for the pretrained base model. Adapters are very lightweight, making it convenient to share, store, and load them. This guide provides a short introduction to the PEFT library and how to use it for training with Transformers. For more details, refer to the PEFT [documentation](https://huggingface.co/docs/peft/index). Install PEFT with the command below. <hfoptions id="install"> <hfoption id="pip"> ```bash pip install -U peft ``` </hfoption> <hfoption id="source"> ```bash pip install git+https://github.com/huggingface/peft.git ``` </hfoption> </hfoptions> > [!TIP] > PEFT currently supports the LoRA, IA3, and AdaLoRA methods for Transformers. To use another PEFT method, such as prompt learning or prompt tuning, use the PEFT library directly. [Low-Rank Adaptation (LoRA)](https://huggingface.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) is a very common PEFT method that decomposes the weight matrix into two smaller trainable matrices. Start by defining a [LoraConfig](https://huggingface.co/docs/peft/package_reference/lora#peft.LoraConfig) object with the parameters shown below. ```py from peft import LoraConfig, TaskType, get_peft_model from transformers import AutoModelForCausalLM # create LoRA configuration object lora_config = LoraConfig( task_type=TaskType.CAUSAL_LM, # type of task to train on inference_mode=False, # set to False for training r=8, # dimension of the smaller matrices lora_alpha=32, # scaling factor lora_dropout=0.1 # dropout of LoRA layers ) ``` Add [LoraConfig](https://huggingface.co/docs/peft/package_reference/lora#peft.LoraConfig) to the model with [`~integrations.PeftAdapterMixin.add_adapter`]. The model is now ready to be passed to [`Trainer`] for training. ```py model.add_adapter(lora_config, adapter_name="lora_1") trainer = Trainer(model=model, ...) trainer.train() ``` To add an additional trainable adapter on top of a model with an existing adapter attached, specify the modules you want to train in [modules_to_save()](https://huggingface.co/docs/peft/package_reference/lora#peft.LoraConfig.modules_to_save). For example, to train the `lm_head` module on top of a causal language model with a LoRA adapter attached, set `modules_to_save=["lm_head"]`. Add the adapter to the model as shown below, and then pass it to [`Trainer`]. ```py from transformers import AutoModelForCausalLM from peft import LoraConfig model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b") lora_config = LoraConfig( target_modules=["q_proj", "k_proj"], modules_to_save=["lm_head"], ) model.add_adapter(lora_config) trainer = Trainer(model=model, ...) trainer.train() ``` Save your adapter with [`~PreTrainedModel.save_pretrained`] to reuse it. ## Load adapter To load an adapter with Transformers, the Hub repository or local directory must contain an `adapter_config.json` file and the adapter weights. Load the adapter with [`~PreTrainedModel.from_pretrained`] or with [`~integrations.PeftAdapterMixin.load_adapter`]. <hfoptions id="load"> <hfoption id="from_pretrained"> ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("klcsp/gemma7b-lora-alpaca-11-v1") ``` </hfoption> <hfoption id="load_adapter"> ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("google/gemma-7b") model.load_adapter("klcsp/gemma7b-lora-alpaca-11-v1") ``` </hfoption> </hfoptions> For very large models, it is helpful to load a quantized version of the model in 8 or 4-bit precision to save memory. Transformers supports quantization with its [bitsandbytes](https://huggingface.co/docs/bitsandbytes/index) integration. Specify in [`BitsAndBytesConfig`] whether you want to load a model in 8 or 4-bit precision. For multiple devices, add `device_map="auto"` to automatically distribute the model across your hardware. ```py from transformers import AutoModelForCausalLM, BitsAndBytesConfig model = AutoModelForCausalLM.from_pretrained( "klcsp/gemma7b-lora-alpaca-11-v1", quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto", ) ``` ## Set adapter [`~integrations.PeftAdapterMixin.add_adapter`] adds a new adapter to a model. To add a second adapter, the new adapter must be the same type as the first adapter. Use the `adapter_name` parameter to assign a name to the adapter. ```py model.add_adapter(lora_config, adapter_name="lora_2") ``` Once added, use [`~integrations.PeftAdapterMixin.set_adapter`] to force a model to use the specified adapter and disable the other adapters. ```py model.set_adapter("lora_2") ``` ## Enable and disable adapter [`~integrations.PeftAdapterMixin.enable_adapters`] is a broader function that enables *all* adapters attached to a model, and [`~integrations.PeftAdapterMixin.disable_adapters`] disables *all* attached adapters. ```py model.add_adapter(lora_1) model.add_adapter(lora_2) model.enable_adapters() # disable all adapters model.disable_adapters() ```
transformers/docs/source/en/peft.md/0
{ "file_path": "transformers/docs/source/en/peft.md", "repo_id": "transformers", "token_count": 1948 }
410
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Web server inference A web server is a system that waits for requests and serves them as they come in. This means you can use [`Pipeline`] as an inference engine on a web server, since you can use an iterator (similar to how you would [iterate over a dataset](./pipeline_tutorial#large-datasets)) to handle each incoming request. Designing a web server with [`Pipeline`] is unique though because they're fundamentally different. Web servers are multiplexed (multithreaded, async, etc.) to handle multiple requests concurrently. [`Pipeline`] and its underlying model on the other hand are not designed for parallelism because they take a lot of memory. It's best to give a [`Pipeline`] all the available resources when they're running or for a compute intensive job. This guide shows how to work around this difference by using a web server to handle the lighter load of receiving and sending requests, and having a single thread to handle the heavier load of running [`Pipeline`]. ## Create a server [Starlette](https://www.starlette.io/) is a lightweight framework for building web servers. You can use any other framework you'd like, but you may have to make some changes to the code below. Before you begin, make sure Starlette and [uvicorn](http://www.uvicorn.org/) are installed. ```py !pip install starlette uvicorn ``` Now you can create a simple web server in a `server.py` file. The key is to only load the model **once** to prevent unnecessary copies of it from consuming memory. Create a pipeline to fill in the masked token, `[MASK]`. ```py from starlette.applications import Starlette from starlette.responses import JSONResponse from starlette.routing import Route from transformers import pipeline import asyncio async def homepage(request): payload = await request.body() string = payload.decode("utf-8") response_q = asyncio.Queue() await request.app.model_queue.put((string, response_q)) output = await response_q.get() return JSONResponse(output) async def server_loop(q): pipe = pipeline(task="fill-mask",model="google-bert/bert-base-uncased") while True: (string, response_q) = await q.get() out = pipe(string) await response_q.put(out) app = Starlette( routes=[ Route("/", homepage, methods=["POST"]), ], ) @app.on_event("startup") async def startup_event(): q = asyncio.Queue() app.model_queue = q asyncio.create_task(server_loop(q)) ``` Start the server with the following command. ```bash uvicorn server:app ``` Query the server with a POST request. ```bash curl -X POST -d "Paris is the [MASK] of France." http://localhost:8000/ ``` This should return the output below. ```bash [{'score': 0.9969332218170166, 'token': 3007, 'token_str': 'capital', 'sequence': 'paris is the capital of france.'}, {'score': 0.0005914849461987615, 'token': 2540, 'token_str': 'heart', 'sequence': 'paris is the heart of france.'}, {'score': 0.00043787318281829357, 'token': 2415, 'token_str': 'center', 'sequence': 'paris is the center of france.'}, {'score': 0.0003378340043127537, 'token': 2803, 'token_str': 'centre', 'sequence': 'paris is the centre of france.'}, {'score': 0.00026995912776328623, 'token': 2103, 'token_str': 'city', 'sequence': 'paris is the city of france.'}] ``` ## Queuing requests The server's queuing mechanism can be used for some interesting applications such as dynamic batching. Dynamic batching accumulates several requests first before processing them with [`Pipeline`]. The example below is written in pseudocode for readability rather than performance, in particular, you'll notice that: 1. There is no batch size limit. 2. The timeout is reset on every queue fetch, so you could end up waiting much longer than the `timeout` value before processing a request. This would also delay the first inference request by that amount of time. The web server always waits 1ms even if the queue is empty, which is inefficient, because that time can be used to start inference. It could make sense though if batching is essential to your use case. It would be better to have a single 1ms deadline, instead of resetting it on every fetch, as shown below. ```py async def server_loop(q): pipe = pipeline(task="fill-mask", model="google-bert/bert-base-uncased") while True: (string, rq) = await q.get() strings = [] queues = [] strings.append(string) queues.append(rq) while True: try: (string, rq) = await asyncio.wait_for(q.get(), timeout=1) except asyncio.exceptions.TimeoutError: break strings.append(string) queues.append(rq) outs = pipe(strings, batch_size=len(strings)) for rq, out in zip(queues, outs): await rq.put(out) ``` ## Error checking There are many things that can go wrong in production. You could run out-of-memory, out of space, fail to load a model, have an incorrect model configuration, have an incorrect query, and so much more. Adding `try...except` statements is helpful for returning these errors to the user for debugging. Keep in mind this could be a security risk if you shouldn't be revealing certain information. ## Circuit breaking Try to return a 503 or 504 error when the server is overloaded instead of forcing a user to wait indefinitely. It is relatively simple to implement these error types since it's only a single queue. Take a look at the queue size to determine when to start returning errors before your server fails under load. ## Block the main thread PyTorch is not async aware, so computation will block the main thread from running. For this reason, it's better to run PyTorch on its own separate thread or process. When inference of a single request is especially long (more than 1s), it's even more important because it means every query during inference must wait 1s before even receiving an error. ## Dynamic batching Dynamic batching can be very effective when used in the correct setting, but it's not necessary when you're only passing 1 request at a time (see [batch inference](./pipeline_tutorial#batch-inference) for more details).
transformers/docs/source/en/pipeline_webserver.md/0
{ "file_path": "transformers/docs/source/en/pipeline_webserver.md", "repo_id": "transformers", "token_count": 2086 }
411
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # HIGGS [HIGGS](https://huggingface.co/papers/2411.17525) is a zero-shot quantization algorithm that combines Hadamard preprocessing with MSE-Optimal quantization grids to achieve lower quantization error and state-of-the-art performance. Runtime support for HIGGS is implemented through the [FLUTE](https://github.com/HanGuo97/flute) library. Only the 70B and 405B variants of Llama 3 and Llama 3.0, and the 8B and 27B variants of Gemma 2 are currently supported. HIGGS also doesn't support quantized training and backward passes in general at the moment. Run the command below to install FLUTE. <hfoptions id="install"> <hfoption id="CUDA 12.1"> ```bash pip install flute-kernel ``` </hfoption> <hfoption id="CUDA 11.8"> ```bash pip install flute-kernel -i https://flute-ai.github.io/whl/cu12.4 ``` </hfoption> </hfoptions> Create a [`HiggsConfig`] with the number of bits to quantize a model to. ```python from transformers import AutoModelForCausalLM, AutoTokenizer, HiggsConfig model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-9b-it", quantization_config=HiggsConfig(bits=4), device_map="auto", ) ``` > [!TIP] > Find models pre-quantized with HIGGS in the official ISTA-DASLab [collection](https://huggingface.co/collections/ISTA-DASLab/higgs-675308e432fd56b7f6dab94e). ## torch.compile HIGGS is fully compatible with [torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html). ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, HiggsConfig model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-9b-it", quantization_config=HiggsConfig(bits=4), device_map="auto", ) model = torch.compile(model) ``` Refer to the table below for a benchmark of forward passes/sec for Llama-3.1-8B-Instruct on a RTX4090. | Batch Size | BF16 (with `torch.compile`) | HIGGS 4bit (without `torch.compile`) | HIGGS 4bit (with `torch.compile`) | |------------|-----------------------------|----------------------------------|-----------------------------------| | 1 | 59 | 41 | 124 | | 4 | 57 | 42 | 123 | | 16 | 56 | 41 | 120 |
transformers/docs/source/en/quantization/higgs.md/0
{ "file_path": "transformers/docs/source/en/quantization/higgs.md", "repo_id": "transformers", "token_count": 1201 }
412
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Audio classification [[open-in-colab]] <Youtube id="KWwzcmG98Ds"/> Audio classification - just like with text - assigns a class label as output from the input data. The only difference is instead of text inputs, you have raw audio waveforms. Some practical applications of audio classification include identifying speaker intent, language classification, and even animal species by their sounds. This guide will show you how to: 1. Fine-tune [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) on the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset to classify speaker intent. 2. Use your fine-tuned model for inference. <Tip> To see all architectures and checkpoints compatible with this task, we recommend checking the [task-page](https://huggingface.co/tasks/audio-classification) </Tip> Before you begin, make sure you have all the necessary libraries installed: ```bash pip install transformers datasets evaluate ``` We encourage you to login to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to login: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load MInDS-14 dataset Start by loading the MInDS-14 dataset from the ๐Ÿค— Datasets library: ```py >>> from datasets import load_dataset, Audio >>> minds = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` Split the dataset's `train` split into a smaller train and test set with the [`~datasets.Dataset.train_test_split`] method. This will give you a chance to experiment and make sure everything works before spending more time on the full dataset. ```py >>> minds = minds.train_test_split(test_size=0.2) ``` Then take a look at the dataset: ```py >>> minds DatasetDict({ train: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 450 }) test: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 113 }) }) ``` While the dataset contains a lot of useful information, like `lang_id` and `english_transcription`, you will focus on the `audio` and `intent_class` in this guide. Remove the other columns with the [`~datasets.Dataset.remove_columns`] method: ```py >>> minds = minds.remove_columns(["path", "transcription", "english_transcription", "lang_id"]) ``` Here's an example: ```py >>> minds["train"][0] {'audio': {'array': array([ 0. , 0. , 0. , ..., -0.00048828, -0.00024414, -0.00024414], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602b9a5fbb1e6d0fbce91f52.wav', 'sampling_rate': 8000}, 'intent_class': 2} ``` There are two fields: - `audio`: a 1-dimensional `array` of the speech signal that must be called to load and resample the audio file. - `intent_class`: represents the class id of the speaker's intent. To make it easier for the model to get the label name from the label id, create a dictionary that maps the label name to an integer and vice versa: ```py >>> labels = minds["train"].features["intent_class"].names >>> label2id, id2label = dict(), dict() >>> for i, label in enumerate(labels): ... label2id[label] = str(i) ... id2label[str(i)] = label ``` Now you can convert the label id to a label name: ```py >>> id2label[str(2)] 'app_error' ``` ## Preprocess The next step is to load a Wav2Vec2 feature extractor to process the audio signal: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` The MInDS-14 dataset has a sampling rate of 8kHz (you can find this information in its [dataset card](https://huggingface.co/datasets/PolyAI/minds14)), which means you'll need to resample the dataset to 16kHz to use the pretrained Wav2Vec2 model: ```py >>> minds = minds.cast_column("audio", Audio(sampling_rate=16_000)) >>> minds["train"][0] {'audio': {'array': array([ 2.2098757e-05, 4.6582241e-05, -2.2803260e-05, ..., -2.8419291e-04, -2.3305941e-04, -1.1425107e-04], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602b9a5fbb1e6d0fbce91f52.wav', 'sampling_rate': 16000}, 'intent_class': 2} ``` Now create a preprocessing function that: 1. Calls the `audio` column to load, and if necessary, resample the audio file. 2. Checks if the sampling rate of the audio file matches the sampling rate of the audio data a model was pretrained with. You can find this information in the Wav2Vec2 [model card](https://huggingface.co/facebook/wav2vec2-base). 3. Set a maximum input length to batch longer inputs without truncating them. ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=16000, truncation=True ... ) ... return inputs ``` To apply the preprocessing function over the entire dataset, use ๐Ÿค— Datasets [`~datasets.Dataset.map`] function. You can speed up `map` by setting `batched=True` to process multiple elements of the dataset at once. Remove unnecessary columns and rename `intent_class` to `label`, as required by the model: ```py >>> encoded_minds = minds.map(preprocess_function, remove_columns="audio", batched=True) >>> encoded_minds = encoded_minds.rename_column("intent_class", "label") ``` ## Evaluate Including a metric during training is often helpful for evaluating your model's performance. You can quickly load an evaluation method with the ๐Ÿค— [Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, load the [accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) metric (see the ๐Ÿค— Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric): ```py >>> import evaluate >>> accuracy = evaluate.load("accuracy") ``` Then create a function that passes your predictions and labels to [`~evaluate.EvaluationModule.compute`] to calculate the accuracy: ```py >>> import numpy as np >>> def compute_metrics(eval_pred): ... predictions = np.argmax(eval_pred.predictions, axis=1) ... return accuracy.compute(predictions=predictions, references=eval_pred.label_ids) ``` Your `compute_metrics` function is ready to go now, and you'll return to it when you setup your training. ## Train <frameworkcontent> <pt> <Tip> If you aren't familiar with finetuning a model with the [`Trainer`], take a look at the basic tutorial [here](../training#train-with-pytorch-trainer)! </Tip> You're ready to start training your model now! Load Wav2Vec2 with [`AutoModelForAudioClassification`] along with the number of expected labels, and the label mappings: ```py >>> from transformers import AutoModelForAudioClassification, TrainingArguments, Trainer >>> num_labels = len(id2label) >>> model = AutoModelForAudioClassification.from_pretrained( ... "facebook/wav2vec2-base", num_labels=num_labels, label2id=label2id, id2label=id2label ... ) ``` At this point, only three steps remain: 1. Define your training hyperparameters in [`TrainingArguments`]. The only required parameter is `output_dir`, which specifies where to save your model. You'll push this model to the Hub by setting `push_to_hub=True` (you need to be signed in to Hugging Face to upload your model). At the end of each epoch, the [`Trainer`] will evaluate the accuracy and save the training checkpoint. 2. Pass the training arguments to [`Trainer`] along with the model, dataset, tokenizer, data collator, and `compute_metrics` function. 3. Call [`~Trainer.train`] to fine-tune your model. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_mind_model", ... eval_strategy="epoch", ... save_strategy="epoch", ... learning_rate=3e-5, ... per_device_train_batch_size=32, ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=32, ... num_train_epochs=10, ... warmup_ratio=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=encoded_minds["train"], ... eval_dataset=encoded_minds["test"], ... processing_class=feature_extractor, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` Once training is completed, share your model to the Hub with the [`~transformers.Trainer.push_to_hub`] method so everyone can use your model: ```py >>> trainer.push_to_hub() ``` </pt> </frameworkcontent> <Tip> For a more in-depth example of how to fine-tune a model for audio classification, take a look at the corresponding [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb). </Tip> ## Inference Great, now that you've fine-tuned a model, you can use it for inference! Load an audio file for inference. Remember to resample the sampling rate of the audio file to match the model's sampling rate, if necessary. ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) >>> sampling_rate = dataset.features["audio"].sampling_rate >>> audio_file = dataset[0]["audio"]["path"] ``` The simplest way to try out your fine-tuned model for inference is to use it in a [`pipeline`]. Instantiate a `pipeline` for audio classification with your model, and pass your audio file to it: ```py >>> from transformers import pipeline >>> classifier = pipeline("audio-classification", model="stevhliu/my_awesome_minds_model") >>> classifier(audio_file) [ {'score': 0.09766869246959686, 'label': 'cash_deposit'}, {'score': 0.07998877018690109, 'label': 'app_error'}, {'score': 0.0781070664525032, 'label': 'joint_account'}, {'score': 0.07667109370231628, 'label': 'pay_bill'}, {'score': 0.0755252093076706, 'label': 'balance'} ] ``` You can also manually replicate the results of the `pipeline` if you'd like: <frameworkcontent> <pt> Load a feature extractor to preprocess the audio file and return the `input` as PyTorch tensors: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("stevhliu/my_awesome_minds_model") >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") ``` Pass your inputs to the model and return the logits: ```py >>> from transformers import AutoModelForAudioClassification >>> model = AutoModelForAudioClassification.from_pretrained("stevhliu/my_awesome_minds_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` Get the class with the highest probability, and use the model's `id2label` mapping to convert it to a label: ```py >>> import torch >>> predicted_class_ids = torch.argmax(logits).item() >>> predicted_label = model.config.id2label[predicted_class_ids] >>> predicted_label 'cash_deposit' ``` </pt> </frameworkcontent>
transformers/docs/source/en/tasks/audio_classification.md/0
{ "file_path": "transformers/docs/source/en/tasks/audio_classification.md", "repo_id": "transformers", "token_count": 4036 }
413
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Prompt engineering [[open-in-colab]] Prompt engineering or prompting, uses natural language to improve large language model (LLM) performance on a variety of tasks. A prompt can steer the model towards generating a desired output. In many cases, you don't even need a [fine-tuned](#finetuning) model for a task. You just need a good prompt. Try prompting a LLM to classify some text. When you create a prompt, it's important to provide very specific instructions about the task and what the result should look like. ```py from transformers import pipeline import torch pipeline = pipeline(task="text-generation", model="mistralai/Mistal-7B-Instruct-v0.1", dtype=torch.bfloat16, device_map="auto") prompt = """Classify the text into neutral, negative or positive. Text: This movie is definitely one of my favorite movies of its kind. The interaction between respectable and morally strong characters is an ode to chivalry and the honor code amongst thieves and policemen. Sentiment: """ outputs = pipeline(prompt, max_new_tokens=10) for output in outputs: print(f"Result: {output['generated_text']}") Result: Classify the text into neutral, negative or positive. Text: This movie is definitely one of my favorite movies of its kind. The interaction between respectable and morally strong characters is an ode to chivalry and the honor code amongst thieves and policemen. Sentiment: Positive ``` The challenge lies in designing prompts that produces the results you're expecting because language is so incredibly nuanced and expressive. This guide covers prompt engineering best practices, techniques, and examples for how to solve language and reasoning tasks. ## Best practices 1. Try to pick the latest models for the best performance. Keep in mind that LLMs can come in two variants, [base](https://hf.co/mistralai/Mistral-7B-v0.1) and [instruction-tuned](https://hf.co/mistralai/Mistral-7B-Instruct-v0.1) (or chat). Base models are excellent at completing text given an initial prompt, but they're not as good at following instructions. Instruction-tuned models are specifically trained versions of the base models on instructional or conversational data. This makes instruction-tuned models a better fit for prompting. > [!WARNING] > Modern LLMs are typically decoder-only models, but there are some encoder-decoder LLMs like [Flan-T5](../model_doc/flan-t5) or [BART](../model_doc/bart) that may be used for prompting. For encoder-decoder models, make sure you set the pipeline task identifier to `text2text-generation` instead of `text-generation`. 2. Start with a short and simple prompt, and iterate on it to get better results. 3. Put instructions at the beginning or end of a prompt. For longer prompts, models may apply optimizations to prevent attention from scaling quadratically, which places more emphasis at the beginning and end of a prompt. 4. Clearly separate instructions from the text of interest. 5. Be specific and descriptive about the task and the desired output, including for example, its format, length, style, and language. Avoid ambiguous descriptions and instructions. 6. Instructions should focus on "what to do" rather than "what not to do". 7. Lead the model to generate the correct output by writing the first word or even the first sentence. 8. Try other techniques like [few-shot](#few-shot) and [chain-of-thought](#chain-of-thought) to improve results. 9. Test your prompts with different models to assess their robustness. 10. Version and track your prompt performance. ## Techniques Crafting a good prompt alone, also known as zero-shot prompting, may not be enough to get the results you want. You may need to try a few prompting techniques to get the best performance. This section covers a few prompting techniques. ### Few-shot prompting Few-shot prompting improves accuracy and performance by including specific examples of what a model should generate given an input. The explicit examples give the model a better understanding of the task and the output format youโ€™re looking for. Try experimenting with different numbers of examples (2, 4, 8, etc.) to see how it affects performance. The example below provides the model with 1 example (1-shot) of the output format (a date in MM/DD/YYYY format) it should return. ```python from transformers import pipeline import torch pipeline = pipeline(model="mistralai/Mistral-7B-Instruct-v0.1", dtype=torch.bfloat16, device_map="auto") prompt = """Text: The first human went into space and orbited the Earth on April 12, 1961. Date: 04/12/1961 Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon. Date:""" outputs = pipeline(prompt, max_new_tokens=12, do_sample=True, top_k=10) for output in outputs: print(f"Result: {output['generated_text']}") # Result: Text: The first human went into space and orbited the Earth on April 12, 1961. # Date: 04/12/1961 # Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon. # Date: 09/28/1960 ``` The downside of few-shot prompting is that you need to create lengthier prompts which increases computation and latency. There is also a limit to prompt lengths. Finally, a model can learn unintended patterns from your examples, and it may not work well on complex reasoning tasks. To improve few-shot prompting for modern instruction-tuned LLMs, use a model's specific [chat template](../conversations). These models are trained on datasets with turn-based conversations between a "user" and "assistant". Structuring your prompt to align with this can improve performance. Structure your prompt as a turn-based conversation and use the [`apply_chat_template`] method to tokenize and format it. ```python from transformers import pipeline import torch pipeline = pipeline(model="mistralai/Mistral-7B-Instruct-v0.1", dtype=torch.bfloat16, device_map="auto") messages = [ {"role": "user", "content": "Text: The first human went into space and orbited the Earth on April 12, 1961."}, {"role": "assistant", "content": "Date: 04/12/1961"}, {"role": "user", "content": "Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon."} ] prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) outputs = pipeline(prompt, max_new_tokens=12, do_sample=True, top_k=10) for output in outputs: print(f"Result: {output['generated_text']}") ``` While the basic few-shot prompting approach embedded examples within a single text string, the chat template format offers the following benefits. - The model may have a potentially improved understanding because it can better recognize the pattern and the expected roles of user input and assistant output. - The model may more consistently output the desired output format because it is structured like its input during training. Always consult a specific instruction-tuned model's documentation to learn more about the format of their chat template so that you can structure your few-shot prompts accordingly. ### Chain-of-thought Chain-of-thought (CoT) is effective at generating more coherent and well-reasoned outputs by providing a series of prompts that help a model "think" more thoroughly about a topic. The example below provides the model with several prompts to work through intermediate reasoning steps. ```py from transformers import pipeline import torch pipeline = pipeline(model="mistralai/Mistral-7B-Instruct-v0.1", dtype=torch.bfloat16, device_map="auto") prompt = """Let's go through this step-by-step: 1. You start with 15 muffins. 2. You eat 2 muffins, leaving you with 13 muffins. 3. You give 5 muffins to your neighbor, leaving you with 8 muffins. 4. Your partner buys 6 more muffins, bringing the total number of muffins to 14. 5. Your partner eats 2 muffins, leaving you with 12 muffins. If you eat 6 muffins, how many are left?""" outputs = pipeline(prompt, max_new_tokens=20, do_sample=True, top_k=10) for output in outputs: print(f"Result: {output['generated_text']}") Result: Let's go through this step-by-step: 1. You start with 15 muffins. 2. You eat 2 muffins, leaving you with 13 muffins. 3. You give 5 muffins to your neighbor, leaving you with 8 muffins. 4. Your partner buys 6 more muffins, bringing the total number of muffins to 14. 5. Your partner eats 2 muffins, leaving you with 12 muffins. If you eat 6 muffins, how many are left? Answer: 6 ``` Like [few-shot](#few-shot) prompting, the downside of CoT is that it requires more effort to design a series of prompts that help the model reason through a complex task and prompt length increases latency. ## Fine-tuning While prompting is a powerful way to work with LLMs, there are scenarios where a fine-tuned model or even fine-tuning a model works better. Here are some examples scenarios where a fine-tuned model makes sense. - Your domain is extremely different from what a LLM was pretrained on, and extensive prompting didn't produce the results you want. - Your model needs to work well in a low-resource language. - Your model needs to be trained on sensitive data that have strict regulatory requirements. - You're using a small model due to cost, privacy, infrastructure, or other constraints. In all of these scenarios, ensure that you have a large enough domain-specific dataset to train your model with, have enough time and resources, and the cost of fine-tuning is worth it. Otherwise, you may be better off trying to optimize your prompt. ## Examples The examples below demonstrate prompting a LLM for different tasks. <hfoptions id="tasks"> <hfoption id="named entity recognition"> ```py from transformers import pipeline import torch pipeline = pipeline(model="mistralai/Mistral-7B-Instruct-v0.1", dtype=torch.bfloat16, device_map="auto") prompt = """Return a list of named entities in the text. Text: The company was founded in 2016 by French entrepreneurs Clรฉment Delangue, Julien Chaumond, and Thomas Wolf in New York City, originally as a company that developed a chatbot app targeted at teenagers. Named entities: """ outputs = pipeline(prompt, max_new_tokens=50, return_full_text=False) for output in outputs: print(f"Result: {output['generated_text']}") Result: [Clรฉment Delangue, Julien Chaumond, Thomas Wolf, company, New York City, chatbot app, teenagers] ``` </hfoption> <hfoption id="translation"> ```py from transformers import pipeline import torch pipeline = pipeline(model="mistralai/Mistral-7B-Instruct-v0.1", dtype=torch.bfloat16, device_map="auto") prompt = """Translate the English text to French. Text: Sometimes, I've believed as many as six impossible things before breakfast. Translation: """ outputs = pipeline(prompt, max_new_tokens=20, do_sample=True, top_k=10, return_full_text=False) for output in outputs: print(f"Result: {output['generated_text']}") Result: ร€ l'occasion, j'ai croyu plus de six choses impossibles ``` </hfoption> <hfoption id="summarization"> ```py from transformers import pipeline import torch pipeline = pipeline(model="mistralai/Mistral-7B-Instruct-v0.1", dtype=torch.bfloat16, device_map="auto") prompt = """Permaculture is a design process mimicking the diversity, functionality and resilience of natural ecosystems. The principles and practices are drawn from traditional ecological knowledge of indigenous cultures combined with modern scientific understanding and technological innovations. Permaculture design provides a framework helping individuals and communities develop innovative, creative and effective strategies for meeting basic needs while preparing for and mitigating the projected impacts of climate change. Write a summary of the above text. Summary: """ outputs = pipeline(prompt, max_new_tokens=30, do_sample=True, top_k=10, return_full_text=False) for output in outputs: print(f"Result: {output['generated_text']}") Result: Permaculture is the design process that involves mimicking natural ecosystems to provide sustainable solutions to basic needs. It is a holistic approach that comb ``` </hfoption> <hfoption id="question answering"> ```py from transformers import pipeline import torch pipeline = pipeline(model="mistralai/Mistral-7B-Instruct-v0.1", dtype=torch.bfloat16, device_map="auto") prompt = """Answer the question using the context below. Context: Gazpacho is a cold soup and drink made of raw, blended vegetables. Most gazpacho includes stale bread, tomato, cucumbers, onion, bell peppers, garlic, olive oil, wine vinegar, water, and salt. Northern recipes often include cumin and/or pimentรณn (smoked sweet paprika). Traditionally, gazpacho was made by pounding the vegetables in a mortar with a pestle; this more laborious method is still sometimes used as it helps keep the gazpacho cool and avoids the foam and silky consistency of smoothie versions made in blenders or food processors. Question: What modern tool is used to make gazpacho? Answer: """ outputs = pipeline(prompt, max_new_tokens=10, do_sample=True, top_k=10, return_full_text=False) for output in outputs: print(f"Result: {output['generated_text']}") Result: A blender or food processor is the modern tool ``` </hfoption> </hfoptions>
transformers/docs/source/en/tasks/prompting.md/0
{ "file_path": "transformers/docs/source/en/tasks/prompting.md", "repo_id": "transformers", "token_count": 3739 }
414
### `tiny-agents` CLI and MCP Tools To showcase the use of MCP tools, let's see how to integrate the `transformers serve` server with the [`tiny-agents`](https://huggingface.co/blog/python-tiny-agents) CLI. > [!TIP] > Many Hugging Face Spaces can be used as MCP servers, as in this example. You can find all compatible Spaces [here](https://huggingface.co/spaces?filter=mcp-server). The first step to use MCP tools is to let the model know which tools are available. As an example, let's consider a `tiny-agents` configuration file with a reference to an [image generation MCP server](https://evalstate-flux1-schnell.hf.space/). ```json { "model": "Menlo/Jan-nano", "endpointUrl": "http://localhost:8000", "servers": [ { "type": "sse", "url": "https://evalstate-flux1-schnell.hf.space/gradio_api/mcp/sse" } ] } ``` You can then launch your `tiny-agents` chat interface with the following command. ```bash tiny-agents run path/to/your/config.json ``` If you have `transformers serve` running in the background, you're ready to use MCP tools from a local model! For instance, here's the example of a chat session with `tiny-agents`: ```bash Agent loaded with 1 tools: โ€ข flux1_schnell_infer ยป Generate an image of a cat on the moon <Tool req_0_tool_call>flux1_schnell_infer {"prompt": "a cat on the moon", "seed": 42, "randomize_seed": true, "width": 1024, "height": 1024, "num_inference_steps": 4} Tool req_0_tool_call [Binary Content: Image image/webp, 57732 bytes] The task is complete and the content accessible to the User Image URL: https://evalstate-flux1-schnell.hf.space/gradio_api/file=/tmp/gradio/3dbddc0e53b5a865ed56a4e3dbdd30f3f61cf3b8aabf1b456f43e5241bd968b8/image.webp 380576952 I have generated an image of a cat on the moon using the Flux 1 Schnell Image Generator. The image is 1024x1024 pixels and was created with 4 inference steps. Let me know if you would like to make any changes or need further assistance! ```
transformers/docs/source/en/tiny_agents.md/0
{ "file_path": "transformers/docs/source/en/tiny_agents.md", "repo_id": "transformers", "token_count": 699 }
415
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Plantillas para Modelos de Chat ## Introducciรณn Un caso de uso cada vez mรกs comรบn para LLMs es **el chat**. En un contexto de chat, en lugar de continuar una รบnica cadena de texto (como es el caso con un modelo de lenguaje estรกndar), el modelo continรบa una conversaciรณn que consta de uno o mรกs **mensajes**, cada uno de los cuales incluye un **rol**, como "usuario" o "asistente", asรญ como el texto del mensaje. Al igual que con la tokenizaciรณn, diferentes modelos esperan formatos de entrada muy diferentes para el chat. Esta es la razรณn por la que agregamos las plantillas de chat como una caracterรญstica. Las plantillas de chat son parte del tokenizador. Especifican cรณmo convertir conversaciones, representadas como listas de mensajes, en una รบnica cadena tokenizable en el formato que el modelo espera. Vamos a hacer esto con un ejemplo concreto utilizando el modelo `BlenderBot`. BlenderBot tiene una plantilla predeterminada extremadamente simple, que principalmente solo agrega espacios en blanco entre rondas de diรกlogo: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> chat = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] >>> tokenizer.apply_chat_template(chat, tokenize=False) " Hello, how are you? I'm doing great. How can I help you today? I'd like to show off how chat templating works!</s>" ``` Observa cรณmo todo el chat se condensa en una sola cadena. Si usamos `tokenize=True`, que es la configuraciรณn predeterminada, esa cadena tambiรฉn serรก tokenizada para nosotros. Sin embargo, para ver una plantilla mรกs compleja en acciรณn, usemos el modelo `mistralai/Mistral-7B-Instruct-v0.1` ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") >>> chat = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] >>> tokenizer.apply_chat_template(chat, tokenize=False) "<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]" ``` Ten en cuenta que esta vez, el tokenizador ha aรฑadido los tokens de control [INST] y [/INST] para indicar el inicio y el final de los mensajes de usuario (ยกpero no de los mensajes del asistente!). Mistral-instruct fue entrenado con estos tokens, pero BlenderBot no lo fue. ## ยฟCรณmo uso las plantillas de chat? Como puedes ver en el ejemplo anterior, las plantillas de chat son fรกciles de usar. Simplemente construye una lista de mensajes, con claves de `rol` y `contenido`, y luego pรกsala al mรฉtodo [`~PreTrainedTokenizer.apply_chat_template`]. Una vez que hagas eso, ยกobtendrรกs una salida lista para usar! Al utilizar plantillas de chat como entrada para la generaciรณn de modelos, tambiรฉn es una buena idea usar `add_generation_prompt=True` para agregar una [indicaciรณn de generaciรณn](#ยฟQuรฉ-son-los-"generation-prompts"?). Aquรญ tienes un ejemplo de cรณmo preparar la entrada para `model.generate()` utilizando el modelo de asistente `Zephyr`: ```python from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "HuggingFaceH4/zephyr-7b-beta" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint) # You may want to use bfloat16 and/or move to GPU here messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") print(tokenizer.decode(tokenized_chat[0])) ``` Esto generarรก una cadena en el formato de entrada que Zephyr espera. ```text <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> ``` Ahora que nuestra entrada estรก formateada correctamente para Zephyr, podemos usar el modelo para generar una respuesta a la pregunta del usuario: ```python outputs = model.generate(tokenized_chat, max_new_tokens=128) print(tokenizer.decode(outputs[0])) ``` Esto producirรก: ```text <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all. ``` ยกArr, al final resultรณ ser fรกcil! ## ยฟExiste un pipeline automatizado para chats? Sรญ, lo hay! Nuestros canales de generaciรณn de texto admiten entradas de chat, cual facilita mรกs facรญl utilizar los modelos de chat. En el pasado, solรญamos utilizar una clase dedicada "ConversationalPipeline", pero ahora ha quedado obsoleta y su funcionalidad se ha fusionado en [`TextGenerationPipeline`]. Este pipeline estรก diseรฑado para facilitar el uso de modelos de chat. Intentemos el ejemplo de `Zephyr` de nuevo, pero esta vez utilizando el pipeline: ```python from transformers import pipeline pipe = pipeline("conversational", "HuggingFaceH4/zephyr-7b-beta") messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] print(pipe(messages, max_new_tokens=128)[0]['generated_text'][-1]) # Print the assistant's response ``` ```text {'role': 'assistant', 'content': "Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all."} ``` La canalizaciรณn se encargarรก de todos los detalles de la tokenizaciรณn y de llamar a `apply_chat_template` por ti. Una vez que el modelo tenga una plantilla de chat, ยกtodo lo que necesitas hacer es inicializar el pipeline y pasarle la lista de mensajes! # ยฟQuรฉ son los "generation prompts"? Puede que hayas notado que el mรฉtodo `apply_chat_template` tiene un argumento `add_generation_prompt`. Este argumento indica a la plantilla que agregue tokens que indiquen el inicio de una respuesta del bot. Por ejemplo, considera el siguiente chat: ```python messages = [ {"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": "Nice to meet you!"}, {"role": "user", "content": "Can I ask a question?"} ] ``` Asรญ es cรณmo se verรก esto sin un "generation prompt", usando la plantilla ChatML que vimos en el ejemplo de Zephyr: ```python tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False) """<|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> """ ``` Y asรญ es como se ve **con** un "generation prompt": ```python tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) """<|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> <|im_start|>assistant """ ``` Ten en cuenta que esta vez, hemos agregado los tokens que indican el inicio de una respuesta del bot. Esto asegura que cuando el modelo genere texto, escribirรก una respuesta del bot en lugar de hacer algo inesperado, como continuar el mensaje del usuario. Recuerda, los modelos de chat siguen siendo solo modelos de lenguaje: estรกn entrenados para continuar texto, ยกy el chat es solo un tipo especial de texto para ellos! Necesitas guiarlos con los tokens de control apropiados para que sepan lo que se supone que deben estar haciendo. No todos los modelos requieren "generation prompts". Algunos modelos, como BlenderBot y LLaMA, no tienen ningรบn token especial antes de las respuestas del bot. En estos casos, el argumento `add_generation_prompt` no tendrรก ningรบn efecto. El efecto exacto que tiene `add_generation_prompt` dependerรก de la plantilla que se estรฉ utilizando. ## ยฟPuedo usar plantillas de chat en el entrenamiento? ยกSรญ! Recomendamos que apliques la plantilla de chat como un paso de preprocesamiento para tu conjunto de datos. Despuรฉs de esto, simplemente puedes continuar como cualquier otra tarea de entrenamiento de modelos de lenguaje. Durante el entrenamiento, generalmente deberรญas establecer `add_generation_prompt=False`, porque los tokens aรฑadidos para solicitar una respuesta del asistente no serรกn รบtiles durante el entrenamiento. Veamos un ejemplo: ```python from transformers import AutoTokenizer from datasets import Dataset tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") chat1 = [ {"role": "user", "content": "Which is bigger, the moon or the sun?"}, {"role": "assistant", "content": "The sun."} ] chat2 = [ {"role": "user", "content": "Which is bigger, a virus or a bacterium?"}, {"role": "assistant", "content": "A bacterium."} ] dataset = Dataset.from_dict({"chat": [chat1, chat2]}) dataset = dataset.map(lambda x: {"formatted_chat": tokenizer.apply_chat_template(x["chat"], tokenize=False, add_generation_prompt=False)}) print(dataset['formatted_chat'][0]) ``` Y obtenemos: ```text <|user|> Which is bigger, the moon or the sun?</s> <|assistant|> The sun.</s> ``` Desde aquรญ, simplemente continรบa el entrenamiento como lo harรญas con una tarea estรกndar de modelado de lenguaje, utilizando la columna `formatted_chat`. ## Avanzado: ยฟCรณmo funcionan las plantillas de chat? La plantilla de chat para un modelo se almacena en el atributo `tokenizer.chat_template`. Si no se establece ninguna plantilla de chat, se utiliza en su lugar la plantilla predeterminada para esa clase de modelo. Echemos un vistazo a la plantilla para `BlenderBot`: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> tokenizer.chat_template "{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}" ``` ยกEs un poco intimidante! Vamos a agregar algunas lรญneas nuevas y sangria para que sea mรกs legible. Ten en cuenta que la primera lรญnea nueva despuรฉs de cada bloque, asรญ como cualquier espacio en blanco anterior a un bloque, se ignoran de forma predeterminada, utilizando las banderas `trim_blocks` y `lstrip_blocks` de Jinja. Sin embargo, ยกten cuidado! Aunque el espacio en blanco inicial en cada lรญnea se elimina, los espacios entre bloques en la misma lรญnea no. ยกTe recomendamos encarecidamente que verifiques que tu plantilla no estรฉ imprimiendo espacios adicionales donde no deberรญa estarlo! ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ ' ' }} {% endif %} {{ message['content'] }} {% if not loop.last %} {{ ' ' }} {% endif %} {% endfor %} {{ eos_token }} ``` Si nunca has visto uno de estos antes, esto es una [plantilla de Jinja](https://jinja.palletsprojects.com/en/3.1.x/templates/). Jinja es un lenguaje de plantillas que te permite escribir cรณdigo simple que genera texto. En muchos aspectos, el cรณdigo y la sintaxis se asemejan a Python. En Python puro, esta plantilla se verรญa algo asรญ: ```python for idx, message in enumerate(messages): if message['role'] == 'user': print(' ') print(message['content']) if not idx == len(messages) - 1: # Check for the last message in the conversation print(' ') print(eos_token) ``` Efectivamente, la plantilla hace tres cosas: 1. Para cada mensaje, si el mensaje es un mensaje de usuario, aรฑade un espacio en blanco antes de รฉl, de lo contrario no imprime nada. 2. Aรฑade el contenido del mensaje. 3. Si el mensaje no es el รบltimo mensaje, aรฑade dos espacios despuรฉs de รฉl. Despuรฉs del รบltimo mensaje, imprime el token EOS. Esta es una plantilla bastante simple: no aรฑade ningรบn token de control y no admite mensajes "del sistema", que son una forma comรบn de dar al modelo directivas sobre cรณmo debe comportarse en la conversaciรณn posterior. ยกPero Jinja te brinda mucha flexibilidad para hacer esas cosas! Veamos una plantilla de Jinja que pueda formatear las entradas de manera similar a la forma en que LLaMA las formatea (nota que la plantilla real de LLaMA incluye el manejo de mensajes del sistema predeterminados y el manejo de mensajes del sistema ligeramente diferentes en general; ยกno uses esta en tu cรณdigo real!) ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ bos_token + '[INST] ' + message['content'] + ' [/INST]' }} {% elif message['role'] == 'system' %} {{ '<<SYS>>\\n' + message['content'] + '\\n<</SYS>>\\n\\n' }} {% elif message['role'] == 'assistant' %} {{ ' ' + message['content'] + ' ' + eos_token }} {% endif %} {% endfor %} ``` Si observas esto por un momento, puedas ver lo que esta plantilla estรก haciendo: aรฑade tokens especรญficos basados en el "rol" de cada mensaje, que representa quiรฉn lo enviรณ. Los mensajes de usuario, asistente y sistema son claramente distinguibles para el modelo debido a los tokens en los que estรกn envueltos. ## Avanzado: Aรฑadiendo y editando plantillas de chat ### ยฟCรณmo creo una plantilla de chat? Simple, solo escribe una plantilla de Jinja y establece `tokenizer.chat_template`. ยกPuede resultarte mรกs fรกcil comenzar con una plantilla existente de otro modelo y simplemente editarla segรบn tus necesidades! Por ejemplo, podrรญamos tomar la plantilla de LLaMA de arriba y aรฑadir "[ASST]" y "[/ASST]" a los mensajes del asistente: ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }} {% elif message['role'] == 'system' %} {{ '<<SYS>>\\n' + message['content'].strip() + '\\n<</SYS>>\\n\\n' }} {% elif message['role'] == 'assistant' %} {{ '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }} {% endif %} {% endfor %} ``` Ahora, simplemente establece el atributo `tokenizer.chat_template`. ยกLa prรณxima vez que uses [`~PreTrainedTokenizer.apply_chat_template`], se utilizarรก tu nueva plantilla! Este atributo se guardarรก en el archivo tokenizer_config.json, por lo que puedes usar [`~utils.PushToHubMixin.push_to_hub`] para cargar tu nueva plantilla en el Hub y asegurarte de que todos estรฉn utilizando la plantilla correcta para tu modelo. ```python template = tokenizer.chat_template template = template.replace("SYS", "SYSTEM") # Change the system token tokenizer.chat_template = template # Set the new template tokenizer.push_to_hub("model_name") # Upload your new template to the Hub! ``` El mรฉtodo [`~PreTrainedTokenizer.apply_chat_template`], que utiliza tu plantilla de chat, es llamado por la clase [`TextGenerationPipeline`], asรญ que una vez que configures la plantilla de chat correcta, tu modelo se volverรก automรกticamente compatible con [`TextGenerationPipeline`]. <Tip> Si estรกs ajustando finamente un modelo para chat, ademรกs de establecer una plantilla de chat, probablemente deberรญas agregar cualquier nuevo token de control de chat como los tokens especiales en el tokenizador. Los tokens especiales nunca se dividen, asegurando que tus tokens de control siempre se manejen como tokens รบnicos en lugar de ser tokenizados en piezas. Tambiรฉn deberรญas establecer el atributo `eos_token` del tokenizador con el token que marca el final de las generaciones del asistente en tu plantilla. Esto asegurarรก que las herramientas de generaciรณn de texto puedan determinar correctamente cuรกndo detener la generaciรณn de texto. </Tip> ### ยฟQuรฉ plantilla deberรญa usar? Cuando establezcas la plantilla para un modelo que ya ha sido entrenado para chat, debes asegurarte de que la plantilla coincida exactamente con el formato de mensajes que el modelo vio durante el entrenamiento, o de lo contrario es probable que experimentes degradaciรณn del rendimiento. Esto es cierto incluso si estรกs entrenando aรบn mรกs el modelo; probablemente obtendrรกs el mejor rendimiento si mantienes constantes los tokens de chat. Esto es muy anรกlogo a la tokenizaciรณn: generalmente obtienes el mejor rendimiento para la inferencia o el ajuste fino cuando coincides precisamente con la tokenizaciรณn utilizada durante el entrenamiento. Si estรกs entrenando un modelo desde cero o ajustando finamente un modelo de lenguaje base para chat, por otro lado, ยกtienes mucha libertad para elegir una plantilla apropiada! Los LLM son lo suficientemente inteligentes como para aprender a manejar muchos formatos de entrada diferentes. Nuestra plantilla predeterminada para modelos que no tienen una plantilla especรญfica de clase sigue el formato ChatML, y esta es una buena elecciรณn flexible para muchos casos de uso. Se ve asรญ: ``` {% for message in messages %} {{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}} {% endfor %} ``` Si te gusta esta plantilla, aquรญ estรก en forma de una sola lรญnea, lista para copiar en tu cรณdigo. La versiรณn de una sola lรญnea tambiรฉn incluye un prรกctico soporte para [prompts de generaciรณn](#ยฟQuรฉ-son-los-"generation-prompts"?), ยกpero ten en cuenta que no aรฑade tokens de BOS o EOS! Si tu modelo espera esos tokens, no se agregarรกn automรกticamente por `apply_chat_template`, en otras palabras, el texto serรก tokenizado con `add_special_tokens=False`. Esto es para evitar posibles conflictos entre la plantilla y la lรณgica de `add_special_tokens`. ยกSi tu modelo espera tokens especiales, asegรบrate de aรฑadirlos a la plantilla! ```python tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" ``` Esta plantilla envuelve cada mensaje en tokens `<|im_start|>` y `<|im_end|>`, y simplemente escribe el rol como una cadena, lo que permite flexibilidad en los roles con los que entrenas. La salida se ve asรญ: ```text <|im_start|>system You are a helpful chatbot that will do its best not to say anything so stupid that people tweet about it.<|im_end|> <|im_start|>user How are you?<|im_end|> <|im_start|>assistant I'm doing great!<|im_end|> ``` Los roles "usuario", "sistema" y "asistente" son los estรกndar para chat, y recomendamos usarlos cuando tenga sentido, particularmente si deseas que tu modelo funcione bien con [`TextGenerationPipeline`]. Sin embargo, no estรกs limitado a estos roles: la plantilla es extremadamente flexible y cualquier cadena puede ser un rol. ### ยกQuiero aรฑadir algunas plantillas de chat! ยฟCรณmo debo empezar? Si tienes algรบn modelo de chat, debes establecer su atributo `tokenizer.chat_template` y probarlo usando [`~PreTrainedTokenizer.apply_chat_template`], luego subir el tokenizador actualizado al Hub. Esto se aplica incluso si no eres el propietario del modelo: si estรกs usando un modelo con una plantilla de chat vacรญa o que todavรญa estรก utilizando la plantilla predeterminada de clase, por favor abre una solicitud de extracciรณn [pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) al repositorio del modelo para que este atributo se pueda establecer correctamente. Una vez que se establece el atributo, ยกeso es todo, has terminado! `tokenizer.apply_chat_template` ahora funcionarรก correctamente para ese modelo, ยกlo que significa que tambiรฉn es compatible automรกticamente en lugares como `TextGenerationPipeline`! Al asegurarnos de que los modelos tengan este atributo, podemos garantizar que toda la comunidad pueda utilizar todo el poder de los modelos de cรณdigo abierto. Los desajustes de formato han estado acechando el campo y daรฑando silenciosamente el rendimiento durante demasiado tiempo: ยกes hora de ponerles fin! ## Avanzado: Consejos para escribir plantillas Si no estรกs familiarizado con Jinja, generalmente encontramos que la forma mรกs fรกcil de escribir una plantilla de chat es primero escribir un script de Python corto que formatee los mensajes como desees, y luego convertir ese script en una plantilla. Recuerda que el manejador de plantillas recibirรก el historial de conversaciรณn como una variable llamada mensajes. Cada mensaje es un diccionario con dos claves, `role` y `content`. Podrรกs acceder a los `mensajes` en tu plantilla tal como lo harรญas en Python, lo que significa que puedes recorrerlo con `{% for message in messages %}` o acceder a mensajes individuales con, por ejemplo, `{{ messages[0] }}`. Tambiรฉn puedes usar los siguientes consejos para convertir tu cรณdigo a Jinja: ### Bucles For Los bucles For en Jinja se ven asรญ: ``` {% for message in messages %} {{ message['content'] }} {% endfor %} ``` Ten en cuenta que todo lo que estรฉ dentro del {{bloque de expresiรณn}} se imprimirรก en la salida. Puedes usar operadores como `+` para combinar cadenas dentro de bloques de expresiรณn. ### Declaraciones if Las declaraciones if en Jinja se ven asรญ: ``` {% if message['role'] == 'user' %} {{ message['content'] }} {% endif %} ``` Observa cรณmo donde Python utiliza espacios en blanco para marcar el inicio y el final de los bloques `for` e `if`, Jinja requiere que los termines explรญcitamente con `{% endfor %}` y `{% endif %}`. ### Variables especiales Dentro de tu plantilla, tendrรกs acceso a la lista de `mensajes`, pero tambiรฉn puedes acceder a varias otras variables especiales. Estas incluyen tokens especiales como `bos_token` y `eos_token`, asรญ como la variable `add_generation_prompt` que discutimos anteriormente. Tambiรฉn puedes usar la variable `loop` para acceder a informaciรณn sobre la iteraciรณn actual del bucle, por ejemplo, usando `{% if loop.last %}` para verificar si el mensaje actual es el รบltimo mensaje en la conversaciรณn. Aquรญ tienes un ejemplo que combina estas ideas para agregar un prompt de generaciรณn al final de la conversaciรณn si add_generation_prompt es `True`: ``` {% if loop.last and add_generation_prompt %} {{ bos_token + 'Assistant:\n' }} {% endif %} ``` ### Notas sobre los espacios en blanco Hemos intentado que Jinja ignore los espacios en blanco fuera de las {{expresiones}} tanto como sea posible. Sin embargo, ten en cuenta que Jinja es un motor de plantillas de propรณsito general y puede tratar el espacio en blanco entre bloques en la misma lรญnea como significativo e imprimirlo en la salida. ยกTe recomendamos **encarecidamente** que verifiques que tu plantilla no estรฉ imprimiendo espacios adicionales donde no deberรญa antes de subirla!
transformers/docs/source/es/chat_templating.md/0
{ "file_path": "transformers/docs/source/es/chat_templating.md", "repo_id": "transformers", "token_count": 8513 }
416
<!--Copyright 2020 de The HuggingFace Team. Todos los derechos reservados Con licencia bajo la Licencia Apache, Versiรณn 2.0 (la "Licencia"); No puedes usar este archivo excepto de conformidad con la Licencia. Puedes obtener una copia de la Licencia en http://www.apache.org/licenses/LICENSE-2.0 Al menos que sea requrido por la ley aplicable o acordado por escrito, el software distribuido bajo la Licencia es distribuido sobre una BASE "AS IS", SIN GARANTIAS O CONDICIONES DE NINGรšN TIPO. Ver la Licencia para el idioma especรญfico que rige los permisos y limitaciones bajo la Licencia. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Filosofรญa ๐Ÿค— Transformers es una biblioteca construida para: - Los investigadores y educadores de NLP que busquen usar/estudiar/extender modelos transformers a gran escala - Profesionales que quieren optimizar esos modelos y/o ponerlos en producciรณn - Ingenieros que solo quieren descargar un modelo preentrenado y usarlo para resolver una tarea NLP dada. La biblioteca fue diseรฑada con dos fuertes objetivos en mente: - Que sea tan fรกcil y rรกpida de utilizar como sea posible: - Hemos limitado enormemente el nรบmero de abstracciones que el usuario tiene que aprender. De hecho, no hay casi abstracciones, solo tres clases estรกndar necesarias para usar cada modelo: [configuration](main_classes/configuration), [models](main_classes/model) y [tokenizer](main_classes/tokenizer). - Todas estas clases pueden ser inicializadas de forma simple y unificada a partir de ejemplos pre-entrenados mediante el uso de un mรฉtodo `from_pretrained()` comรบn de solicitud que se encargarรก de descargar (si es necesario), almacenar y cargar la solicitud de clase relacionada y datos asociados (configurations' hyper-parameters, tokenizers' vocabulary, and models' weights) a partir de un control pre-entrenado proporcionado en [Hugging Face Hub](https://huggingface.co/models) o de tu propio control guardado. - Por encima de esas tres clases estรกndar, la biblioteca proporciona dos APIs: [`pipeline`] para usar rรกpidamente un modelo (junto a su configuracion y tokenizer asociados) sobre una tarea dada, y [`Trainer`]/`Keras.fit` para entrenar u optimizar de forma rรกpida un modelo dado. - Como consecuencia, esta biblioteca NO es una caja de herramientas modular de bloques individuales para redes neuronales. Si quieres extender/construir sobre la biblioteca, usa simplemente los mรณdulos regulares de Python/PyTorch/TensorFlow/Keras y emplea las clases estรกndar de la biblioteca como punto de partida para reutilizar funcionalidades tales como abrir/guardar modelo. - Proporciona modelos modernos con rendimientos lo mรกs parecido posible a los modelos originales: - Proporcionamos al menos un ejemplo para cada arquitectura que reproduce un resultado proporcionado por los autores de dicha arquitectura. - El cรณdigo normalmente es parecido al cรณdigo base original, lo cual significa que algรบn cรณdigo Pytorch puede no ser tan *pytorchic* como podrรญa ser por haber sido convertido a cรณdigo TensorFlow, y viceversa. Unos cuantos objetivos adicionales: - Exponer las caracterรญsticas internas de los modelos de la forma mรกs coherente posible: - Damos acceso, mediante una sola API, a todos los estados ocultos y pesos de atenciรณn. - Tokenizer y el modelo de API base estรกn estandarizados para cambiar fรกcilmente entre modelos. - Incorporar una selecciรณn subjetiva de herramientas de gran potencial para la optimizaciรณn/investigaciรณn de estos modelos: - Una forma sencilla/coherente de aรฑadir nuevos tokens al vocabulario e incrustraciones (embeddings, en inglรฉs) para optimizaciรณn. - Formas sencillas de camuflar y reducir "transformer heads". - Cambiar fรกcilmente entre PyTorch y TensorFlow 2.0, permitiendo el entrenamiento usando un marco y la inferencia usando otro. ## Conceptos principales La biblioteca estรก construida alrededor de tres tipos de clases para cada modelo: - **Model classes** como [`BertModel`], que consisten en mรกs de 30 modelos PyTorch ([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)) o modelos Keras ([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)) que funcionan con pesos pre-entrenados proporcionados en la biblioteca. - **Configuration classes** como [`BertConfig`], que almacena todos los parรกmetros necesarios para construir un modelo. No siempre tienes que generarla tu. En particular, si estas usando un modelo pre-entrenado sin ninguna modificaciรณn, la creaciรณn del modelo se encargarรก automรกticamente de generar la configuraciรณn (que es parte del modelo). - **Tokenizer classes** como [`BertTokenizer`], que almacena el vocabulario para cada modelo y proporciona mรฉtodos para codificar/decodificar strings en una lista de รญndices de "token embeddings" para ser empleados en un modelo. Todas estas clases pueden ser generadas a partir de ejemplos pre-entrenados, y guardados localmente usando dos mรฉtodos: - `from_pretrained()` permite generar un modelo/configuraciรณn/tokenizer a partir de una versiรณn pre-entrenada proporcionada ya sea por la propia biblioteca (los modelos compatibles se pueden encontrar en [Model Hub](https://huggingface.co/models)) o guardados localmente (o en un servidor) por el usuario. - `save_pretrained()` permite guardar un modelo/configuraciรณn/tokenizer localmente, de forma que puede ser empleado de nuevo usando `from_pretrained()`.
transformers/docs/source/es/philosophy.md/0
{ "file_path": "transformers/docs/source/es/philosophy.md", "repo_id": "transformers", "token_count": 1964 }
417
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Respuesta a preguntas <Youtube id="ajPx5LwJD-I"/> La respuesta a preguntas devuelve una respuesta a partir de una pregunta dada. Existen dos formas comunes de responder preguntas: - Extractiva: extraer la respuesta a partir del contexto dado. - Abstractiva: generar una respuesta que responda correctamente la pregunta a partir del contexto dado. Esta guรญa te mostrarรก como hacer fine-tuning de [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) en el dataset [SQuAD](https://huggingface.co/datasets/squad) para responder preguntas de forma extractiva. <Tip> Revisa la [pรกgina de la tarea](https://huggingface.co/tasks/question-answering) de responder preguntas para tener mรกs informaciรณn sobre otras formas de responder preguntas y los modelos, datasets y mรฉtricas asociadas. </Tip> ## Carga el dataset SQuAD Carga el dataset SQuAD con la biblioteca ๐Ÿค— Datasets: ```py >>> from datasets import load_dataset >>> squad = load_dataset("squad") ``` Ahora, รฉchale un vistazo a una muestra: ```py >>> squad["train"][0] {'answers': {'answer_start': [515], 'text': ['Saint Bernadette Soubirous']}, 'context': 'Architecturally, the school has a Catholic character. Atop the Main Building\'s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.', 'id': '5733be284776f41900661182', 'question': 'To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?', 'title': 'University_of_Notre_Dame' } ``` El campo `answers` es un diccionario que contiene la posiciรณn inicial de la respuesta y el `texto` de la respuesta. ## Preprocesamiento <Youtube id="qgaM0weJHpA"/> Carga el tokenizer de DistilBERT para procesar los campos `question` (pregunta) y `context` (contexto): ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` Hay algunos pasos de preprocesamiento especรญficos para la tarea de respuesta a preguntas que debes tener en cuenta: 1. Algunos ejemplos en un dataset pueden tener un contexto que supera la longitud mรกxima de entrada de un modelo. Trunca solamente el contexto asignรกndole el valor `"only_second"` al parรกmetro `truncation`. 2. A continuaciรณn, mapea las posiciones de inicio y fin de la respuesta al contexto original asignรกndole el valor `True` al parรกmetro `return_offsets_mapping`. 3. Una vez tengas el mapeo, puedes encontrar los tokens de inicio y fin de la respuesta. Usa el mรฉtodo [`sequence_ids`](https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.sequence_ids) para encontrar quรฉ parte de la lista de tokens desplazados corresponde a la pregunta y cuรกl corresponde al contexto. A continuaciรณn puedes ver como se crea una funciรณn para truncar y mapear los tokens de inicio y fin de la respuesta al `context`: ```py >>> def preprocess_function(examples): ... questions = [q.strip() for q in examples["question"]] ... inputs = tokenizer( ... questions, ... examples["context"], ... max_length=384, ... truncation="only_second", ... return_offsets_mapping=True, ... padding="max_length", ... ) ... offset_mapping = inputs.pop("offset_mapping") ... answers = examples["answers"] ... start_positions = [] ... end_positions = [] ... for i, offset in enumerate(offset_mapping): ... answer = answers[i] ... start_char = answer["answer_start"][0] ... end_char = answer["answer_start"][0] + len(answer["text"][0]) ... sequence_ids = inputs.sequence_ids(i) ... # Encuentra el inicio y el fin del contexto ... idx = 0 ... while sequence_ids[idx] != 1: ... idx += 1 ... context_start = idx ... while sequence_ids[idx] == 1: ... idx += 1 ... context_end = idx - 1 ... # Si la respuesta entera no estรก dentro del contexto, etiquรฉtala como (0, 0) ... if offset[context_start][0] > end_char or offset[context_end][1] < start_char: ... start_positions.append(0) ... end_positions.append(0) ... else: ... # De lo contrario, esta es la posiciรณn de los tokens de inicio y fin ... idx = context_start ... while idx <= context_end and offset[idx][0] <= start_char: ... idx += 1 ... start_positions.append(idx - 1) ... idx = context_end ... while idx >= context_start and offset[idx][1] >= end_char: ... idx -= 1 ... end_positions.append(idx + 1) ... inputs["start_positions"] = start_positions ... inputs["end_positions"] = end_positions ... return inputs ``` Usa la funciรณn [`~datasets.Dataset.map`] de ๐Ÿค— Datasets para aplicarle la funciรณn de preprocesamiento al dataset entero. Puedes acelerar la funciรณn `map` haciendo `batched=True` para procesar varios elementos del dataset a la vez. Quita las columnas que no necesites: ```py >>> tokenized_squad = squad.map(preprocess_function, batched=True, remove_columns=squad["train"].column_names) ``` Usa el [`DefaultDataCollator`] para crear un lote de ejemplos. A diferencia de los otros collators de datos en ๐Ÿค— Transformers, el `DefaultDataCollator` no aplica ningรบn procesamiento adicional (como el rellenado). <frameworkcontent> <pt> ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator() ``` </pt> <tf> ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator(return_tensors="tf") ``` </tf> </frameworkcontent> ## Entrenamiento <frameworkcontent> <pt> Carga el modelo DistilBERT con [`AutoModelForQuestionAnswering`]: ```py >>> from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer >>> model = AutoModelForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip> Para familiarizarte con el fine-tuning con [`Trainer`], ยกmira el tutorial bรกsico [aquรญ](../training#finetune-with-trainer)! </Tip> En este punto, solo quedan tres pasos: 1. Definir tus hiperparรกmetros de entrenamiento en [`TrainingArguments`]. 2. Pasarle los argumentos del entrenamiento al [`Trainer`] junto con el modelo, el dataset, el tokenizer y el collator de datos. 3. Invocar el mรฉtodo [`~Trainer.train`] para realizar el fine-tuning del modelo. ```py >>> training_args = TrainingArguments( ... output_dir="./results", ... eval_strategy="epoch", ... learning_rate=2e-5, ... per_device_train_batch_size=16, ... per_device_eval_batch_size=16, ... num_train_epochs=3, ... weight_decay=0.01, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=tokenized_squad["train"], ... eval_dataset=tokenized_squad["validation"], ... processing_class=tokenizer, ... data_collator=data_collator, ... ) >>> trainer.train() ``` </pt> <tf> Para realizar el fine-tuning de un modelo en TensorFlow, primero convierte tus datasets al formato `tf.data.Dataset` con el mรฉtodo [`~TFPreTrainedModel.prepare_tf_dataset`]. ```py >>> tf_train_set = model.prepare_tf_dataset( ... tokenized_squad["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_validation_set = model.prepare_tf_dataset( ... tokenized_squad["validation"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` <Tip> Para familiarizarte con el fine-tuning con Keras, ยกmira el tutorial bรกsico [aquรญ](training#finetune-with-keras)! </Tip> Prepara una funciรณn de optimizaciรณn, un programa para la tasa de aprendizaje y algunos hiperparรกmetros de entrenamiento: ```py >>> from transformers import create_optimizer >>> batch_size = 16 >>> num_epochs = 2 >>> total_train_steps = (len(tokenized_squad["train"]) // batch_size) * num_epochs >>> optimizer, schedule = create_optimizer( ... init_lr=2e-5, ... num_warmup_steps=0, ... num_train_steps=total_train_steps, ... ) ``` Carga el modelo DistilBERT con [`TFAutoModelForQuestionAnswering`]: ```py >>> from transformers import TFAutoModelForQuestionAnswering >>> model = TFAutoModelForQuestionAnswering("distilbert/distilbert-base-uncased") ``` Configura el modelo para entrenarlo con [`compile`](https://keras.io/api/models/model_training_apis/#compile-method): ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) ``` Invoca el mรฉtodo [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) para realizar el fine-tuning del modelo: ```py >>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3) ``` </tf> </frameworkcontent> <Tip> Para un ejemplo con mayor profundidad de cรณmo hacer fine-tuning a un modelo para responder preguntas, รฉchale un vistazo al [cuaderno de PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb) o al [cuaderno de TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb) correspondiente. </Tip>
transformers/docs/source/es/tasks/question_answering.md/0
{ "file_path": "transformers/docs/source/es/tasks/question_answering.md", "repo_id": "transformers", "token_count": 3912 }
418
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Condividere modelli personalizzati La libreria ๐Ÿค— Transformers รจ studiata per essere facilmente estendibile. Il codice di ogni modello รจ interamente situato in una sottocartella del repository senza alcuna astrazione, perciรฒ puoi facilmente copiare il file di un modello e modificarlo in base ai tuoi bisogni. Se stai scrivendo un nuovo modello, potrebbe essere piรน semplice iniziare da zero. In questo tutorial, ti mostreremo come scrivere un modello personalizzato e la sua configurazione in modo che possa essere utilizzato allโ€™interno di Transformers, e come condividerlo con la community (assieme al relativo codice) cosรฌ che tutte le persone possano usarlo, anche se non presente nella libreria ๐Ÿค— Transformers. Illustriamo tutto questo su un modello ResNet, avvolgendo la classe ResNet della [libreria timm](https://github.com/rwightman/pytorch-image-models) in un [`PreTrainedModel`]. ## Scrivere una configurazione personalizzata Prima di iniziare a lavorare al modello, scriviamone la configurazione. La configurazione di un modello รจ un oggetto che contiene tutte le informazioni necessarie per la build del modello. Come vedremo nella prossima sezione, il modello puรฒ soltanto essere inizializzato tramite `config`, per cui dovremo rendere tale oggetto piรน completo possibile. Nel nostro esempio, prenderemo un paio di argomenti della classe ResNet che potremmo voler modificare. Configurazioni differenti ci daranno quindi i differenti possibili tipi di ResNet. Salveremo poi questi argomenti, dopo averne controllato la validitร . ```python from transformers import PretrainedConfig from typing import List class ResnetConfig(PretrainedConfig): model_type = "resnet" def __init__( self, block_type="bottleneck", layers: list[int] = [3, 4, 6, 3], num_classes: int = 1000, input_channels: int = 3, cardinality: int = 1, base_width: int = 64, stem_width: int = 64, stem_type: str = "", avg_down: bool = False, **kwargs, ): if block_type not in ["basic", "bottleneck"]: raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.") if stem_type not in ["", "deep", "deep-tiered"]: raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.") self.block_type = block_type self.layers = layers self.num_classes = num_classes self.input_channels = input_channels self.cardinality = cardinality self.base_width = base_width self.stem_width = stem_width self.stem_type = stem_type self.avg_down = avg_down super().__init__(**kwargs) ``` Le tre cose piรน importanti da ricordare quando scrivi le tue configurazioni sono le seguenti: - Devi ereditare da `Pretrainedconfig`, - Il metodo `__init__` del tuo `Pretrainedconfig` deve accettare i kwargs, - I `kwargs` devono essere passati alla superclass `__init__` Lโ€™ereditร  รจ importante per assicurarsi di ottenere tutte le funzionalitร  della libreria ๐Ÿค— transformers, mentre gli altri due vincoli derivano dal fatto che un `Pretrainedconfig` ha piรน campi di quelli che stai settando. Quando ricarichi una config da un metodo `from_pretrained`, questi campi devono essere accettati dalla tua config e poi inviati alla superclasse. Definire un `model_type` per la tua configurazione (qua `model_type = โ€œresnetโ€`) non รจ obbligatorio, a meno che tu non voglia registrare il modello con le classi Auto (vedi l'ultima sezione). Una volta completato, puoi facilmente creare e salvare la tua configurazione come faresti con ogni altra configurazione di modelli della libreria. Ecco come possiamo creare la config di un resnet50d e salvarlo: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d_config.save_pretrained("custom-resnet") ``` Questo salverร  un file chiamato `config.json` all'interno della cartella `custom-resnet`. Potrai poi ricaricare la tua config con il metodo `from_pretrained`. ```py resnet50d_config = ResnetConfig.from_pretrained("custom-resnet") ``` Puoi anche usare qualunque altro metodo della classe [`PretrainedConfig`], come [`~PretrainedConfig.push_to_hub`] per caricare direttamente la tua configurazione nell'hub. ## Scrivere un modello personalizzato Ora che abbiamo la nostra configurazione ResNet, possiamo continuare a scrivere il modello. In realtร , ne scriveremo due: uno che estrae le features nascoste da una batch di immagini (come [`BertModel`]) e uno che รจ utilizzabile per la classificazione di immagini (come [`BertModelForSequenceClassification`]). Come abbiamo menzionato in precedenza, scriveremo soltanto un wrapper del modello, per mantenerlo semplice ai fini di questo esempio. L'unica cosa che dobbiamo fare prima di scrivere questa classe รจ una mappatura fra i tipi di blocco e le vere classi dei blocchi. Successivamente il modello รจ definito tramite la configurazione, passando tutto quanto alla classe `ResNet`. ```py from transformers import PreTrainedModel from timm.models.resnet import BasicBlock, Bottleneck, ResNet from .configuration_resnet import ResnetConfig BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck} class ResnetModel(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor): return self.model.forward_features(tensor) ``` Per il modello che classificherร  le immagini, cambiamo soltanto il metodo forward: ```py import torch class ResnetModelForImageClassification(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor, labels=None): logits = self.model(tensor) if labels is not None: loss = torch.nn.functional.cross_entropy(logits, labels) return {"loss": loss, "logits": logits} return {"logits": logits} ``` Nota come, in entrambi i casi, ereditiamo da `PreTrainedModel` e chiamiamo l'inizializzazione della superclasse con il metodo `config` (un po' come quando scrivi un normale `torch.nn.Module`). La riga che imposta la `config_class` non รจ obbligatoria, a meno che tu non voglia registrare il modello con le classi Auto (vedi l'ultima sezione). <Tip> Se il tuo modello รจ molto simile a un modello all'interno della libreria, puoi ri-usare la stessa configurazione di quel modello. </Tip> Puoi fare in modo che il tuo modello restituisca in output qualunque cosa tu voglia, ma far restituire un dizionario come abbiamo fatto per `ResnetModelForImageClassification`, con la funzione di perdita inclusa quando vengono passate le labels, renderร  il tuo modello direttamente utilizzabile all'interno della classe [`Trainer`]. Utilizzare altri formati di output va bene se hai in progetto di utilizzare un tuo loop di allenamento, o se utilizzerai un'altra libreria per l'addestramento. Ora che abbiamo la classe del nostro modello, creiamone uno: ```py resnet50d = ResnetModelForImageClassification(resnet50d_config) ``` Ribadiamo, puoi usare qualunque metodo dei [`PreTrainedModel`], come [`~PreTrainedModel.save_pretrained`] o [`~PreTrainedModel.push_to_hub`]. Utilizzeremo quest'ultimo nella prossima sezione, e vedremo come caricare i pesi del modello assieme al codice del modello stesso. Ma prima, carichiamo alcuni pesi pre-allenati all'interno del nostro modello. Nel tuo caso specifico, probabilmente allenerai il tuo modello sui tuoi dati. Per velocizzare in questo tutorial, utilizzeremo la versione pre-allenata del resnet50d. Dato che il nostro modello รจ soltanto un wrapper attorno a quel modello, sarร  facile trasferirne i pesi: ```py import timm pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` Vediamo adesso come assicurarci che quando facciamo [`~PreTrainedModel.save_pretrained`] o [`~PreTrainedModel.push_to_hub`], il codice del modello venga salvato. ## Inviare il codice all'Hub <Tip warning={true}> Questa API รจ sperimentale e potrebbe avere alcuni cambiamenti nei prossimi rilasci. </Tip> Innanzitutto, assicurati che il tuo modello sia completamente definito in un file `.py`. Puรฒ sfruttare import relativi ad altri file, purchรจ questi siano nella stessa directory (non supportiamo ancora sotto-moduli per questa funzionalitร ). Per questo esempio, definiremo un file `modeling_resnet.py` e un file `configuration_resnet.py` in una cartella dell'attuale working directory chiamata `resnet_model`. Il file configuration contiene il codice per `ResnetConfig` e il file modeling contiene il codice di `ResnetModel` e `ResnetModelForImageClassification`. ``` . โ””โ”€โ”€ resnet_model โ”œโ”€โ”€ __init__.py โ”œโ”€โ”€ configuration_resnet.py โ””โ”€โ”€ modeling_resnet.py ``` Il file `__init__.py` puรฒ essere vuoto, serve solo perchรจ Python capisca che `resnet_model` puรฒ essere utilizzato come un modulo. <Tip warning={true}> Se stai copiando i file relativi alla modellazione della libreria, dovrai sostituire tutti gli import relativi in cima al file con import del pacchetto `transformers`. </Tip> Nota che puoi ri-utilizzare (o usare come sottoclassi) un modello/configurazione esistente. Per condividere il tuo modello con la community, segui questi passi: prima importa il modello ResNet e la sua configurazione dai nuovi file creati: ```py from resnet_model.configuration_resnet import ResnetConfig from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification ``` Dopodichรจ dovrai dire alla libreria che vuoi copiare i file con il codice di quegli oggetti quando utilizzi il metodo `save_pretrained` e registrarli in modo corretto con una Auto classe (specialmente per i modelli). Utilizza semplicemente: ```py ResnetConfig.register_for_auto_class() ResnetModel.register_for_auto_class("AutoModel") ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification") ``` Nota che non c'รจ bisogno di specificare una Auto classe per la configurazione (c'รจ solo una Auto classe per le configurazioni, [`AutoConfig`], ma รจ diversa per i modelli). Il tuo modello personalizato potrebbe essere utilizzato per diverse tasks, per cui devi specificare quale delle classi Auto รจ quella corretta per il tuo modello. Successivamente, creiamo i modelli e la config come abbiamo fatto in precedenza: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d = ResnetModelForImageClassification(resnet50d_config) pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` Adesso, per inviare il modello all'Hub, assicurati di aver effettuato l'accesso. Lancia dal tuo terminale: ```bash hf auth login ``` O da un notebook: ```py from huggingface_hub import notebook_login notebook_login() ``` Potrai poi inviare il tutto sul tuo profilo (o di un'organizzazione di cui fai parte) in questo modo: ```py resnet50d.push_to_hub("custom-resnet50d") ``` Oltre ai pesi del modello e alla configurazione in formato json, questo ha anche copiato i file `.py` modeling e configuration all'interno della cartella `custom-resnet50d` e ha caricato i risultati sull'Hub. Puoi controllare i risultati in questa [model repo](https://huggingface.co/sgugger/custom-resnet50d). Puoi controllare il tutorial di condivisione [tutorial di condivisione](model_sharing) per piรน informazioni sul metodo con cui inviare all'Hub. ## Usare un modello con codice personalizzato Puoi usare ogni configurazione, modello o tokenizer con file di codice personalizzati nella sua repository con le classi Auto e il metodo `from_pretrained`. Tutti i files e il codice caricati sull'Hub sono scansionati da malware (fai riferimento alla documentazione [Hub security](https://huggingface.co/docs/hub/security#malware-scanning) per piรน informazioni), ma dovresti comunque assicurarti dell'affidabilitร  del codice e dell'autore per evitare di eseguire codice dannoso sulla tua macchina. Imposta `trust_remote_code=True` per usare un modello con codice personalizzato: ```py from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True) ``` Inoltre, raccomandiamo fortemente di passare un hash del commit come `revision` per assicurarti che le autrici o gli autori del modello non abbiano modificato il codice con alcune nuove righe dannose (a meno che non ti fidi completamente della fonte): ```py commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292" model = AutoModelForImageClassification.from_pretrained( "sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash ) ``` Nota che quando cerchi la storia dei commit della repo del modello sull'Hub, c'รจ un bottone con cui facilmente copiare il commit hash di ciascun commit. ## Registrare un modello con codice personalizzato nelle classi Auto Se stai scrivendo una libreria che estende ๐Ÿค— Transformers, potresti voler estendere le classi Auto per includere il tuo modello. Questo รจ diverso dall'inviare codice nell'Hub: gli utenti dovranno importare la tua libreria per ottenere il modello personalizzato (anzichรจ scaricare automaticamente il modello dall'Hub). Finchรจ il tuo file di configurazione ha un attributo `model_type` diverso dai model types esistenti, e finchรจ le tue classi modello hanno i corretti attributi `config_class`, potrai semplicemente aggiungerli alle classi Auto come segue: ```py from transformers import AutoConfig, AutoModel, AutoModelForImageClassification AutoConfig.register("resnet", ResnetConfig) AutoModel.register(ResnetConfig, ResnetModel) AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification) ``` Nota che il primo argomento utilizzato quando registri la configurazione di un modello personalizzato con [`AutoConfig`] deve corrispondere al `model_type` della tua configurazione personalizzata, ed il primo argomento utilizzato quando registri i tuoi modelli personalizzati in una qualunque classe Auto del modello deve corrispondere alla `config_class` di quei modelli.
transformers/docs/source/it/custom_models.md/0
{ "file_path": "transformers/docs/source/it/custom_models.md", "repo_id": "transformers", "token_count": 5883 }
419
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipeline per l'inferenza La [`pipeline`] rende semplice usare qualsiasi modello dal [Model Hub](https://huggingface.co/models) per fare inferenza su diversi compiti come generazione del testo, segmentazione di immagini e classificazione di audio. Anche se non hai esperienza con una modalitร  specifica o non comprendi bene il codice che alimenta i modelli, รจ comunque possibile utilizzarli con l'opzione [`pipeline`]! Questa esercitazione ti insegnerร  a: * Usare una [`pipeline`] per fare inferenza. * Usare uno specifico tokenizer o modello. * Usare una [`pipeline`] per compiti che riguardano audio e video. <Tip> Dai un'occhiata alla documentazione di [`pipeline`] per una lista completa dei compiti supportati. </Tip> ## Utilizzo della Pipeline Nonostante ogni compito abbia una [`pipeline`] associata, รจ piรน semplice utilizzare l'astrazione generica della [`pipeline`] che contiene tutte quelle specifiche per ogni mansione. La [`pipeline`] carica automaticamente un modello predefinito e un tokenizer in grado di fare inferenza per il tuo compito. 1. Inizia creando una [`pipeline`] e specificando il compito su cui fare inferenza: ```py >>> from transformers import pipeline >>> generator = pipeline(task="text-generation") ``` 2. Inserisci il testo in input nella [`pipeline`]: ```py >>> generator( ... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone" ... ) # doctest: +SKIP [{'generated_text': 'Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Iron-priests at the door to the east, and thirteen for the Lord Kings at the end of the mountain'}] ``` Se hai piรน di un input, inseriscilo in una lista: ```py >>> generator( ... [ ... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", ... "Nine for Mortal Men, doomed to die, One for the Dark Lord on his dark throne", ... ] ... ) # doctest: +SKIP ``` Qualsiasi parametro addizionale per il tuo compito puรฒ essere incluso nella [`pipeline`]. La mansione `text-generation` ha un metodo [`~generation.GenerationMixin.generate`] con diversi parametri per controllare l'output. Ad esempio, se desideri generare piรน di un output, utilizza il parametro `num_return_sequences`: ```py >>> generator( ... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", ... num_return_sequences=2, ... ) # doctest: +SKIP ``` ### Scegliere modello e tokenizer La [`pipeline`] accetta qualsiasi modello dal [Model Hub](https://huggingface.co/models). Ci sono tag nel Model Hub che consentono di filtrare i modelli per attivitร . Una volta che avrai scelto il modello appropriato, caricalo usando la corrispondente classe `AutoModelFor` e [`AutoTokenizer`]. Ad esempio, carica la classe [`AutoModelForCausalLM`] per un compito di causal language modeling: ```py >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` Crea una [`pipeline`] per il tuo compito, specificando il modello e il tokenizer che hai caricato: ```py >>> from transformers import pipeline >>> generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer) ``` Inserisci il testo di input nella [`pipeline`] per generare del testo: ```py >>> generator( ... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone" ... ) # doctest: +SKIP [{'generated_text': 'Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Dragon-lords (for them to rule in a world ruled by their rulers, and all who live within the realm'}] ``` ## Audio pipeline La flessibilitร  della [`pipeline`] fa si che possa essere estesa ad attivitร  sugli audio. Per esempio, classifichiamo le emozioni in questo clip audio: ```py >>> from datasets import load_dataset >>> import torch >>> torch.manual_seed(42) # doctest: +IGNORE_RESULT >>> ds = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> audio_file = ds[0]["audio"]["path"] ``` Trova un modello per la [classificazione audio](https://huggingface.co/models?pipeline_tag=audio-classification) sul Model Hub per eseguire un compito di riconoscimento automatico delle emozioni e caricalo nella [`pipeline`]: ```py >>> from transformers import pipeline >>> audio_classifier = pipeline( ... task="audio-classification", model="ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" ... ) ``` Inserisci il file audio nella [`pipeline`]: ```py >>> preds = audio_classifier(audio_file) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.1315, 'label': 'calm'}, {'score': 0.1307, 'label': 'neutral'}, {'score': 0.1274, 'label': 'sad'}, {'score': 0.1261, 'label': 'fearful'}, {'score': 0.1242, 'label': 'happy'}] ``` ## Vision pipeline Infine, usare la [`pipeline`] per le attivitร  sulle immagini รจ praticamente la stessa cosa. Specifica la tua attivitร  e inserisci l'immagine nel classificatore. L'immagine puรฒ essere sia un link che un percorso sul tuo pc in locale. Per esempio, quale specie di gatto รจ raffigurata qui sotto? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) ```py >>> from transformers import pipeline >>> vision_classifier = pipeline(task="image-classification") >>> preds = vision_classifier( ... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] ```
transformers/docs/source/it/pipeline_tutorial.md/0
{ "file_path": "transformers/docs/source/it/pipeline_tutorial.md", "repo_id": "transformers", "token_count": 2398 }
420
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Create a custom architecture [`AutoClass`](model_doc/auto)ใฏใ€ใƒขใƒ‡ใƒซใฎใ‚ขใƒผใ‚ญใƒ†ใ‚ฏใƒใƒฃใ‚’่‡ชๅ‹•็š„ใซๆŽจ่ซ–ใ—ใ€ไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใฎ่จญๅฎšใจ้‡ใฟใ‚’ใƒ€ใ‚ฆใƒณใƒญใƒผใƒ‰ใ—ใพใ™ใ€‚ไธ€่ˆฌ็š„ใซใฏใ€ใƒใ‚งใƒƒใ‚ฏใƒใ‚คใƒณใƒˆใซไพๅญ˜ใ—ใชใ„ใ‚ณใƒผใƒ‰ใ‚’็”Ÿๆˆใ™ใ‚‹ใŸใ‚ใซ`AutoClass`ใ‚’ไฝฟ็”จใ™ใ‚‹ใ“ใจใ‚’ใŠๅ‹งใ‚ใ—ใพใ™ใ€‚ใŸใ ใ—ใ€็‰นๅฎšใฎใƒขใƒ‡ใƒซใƒ‘ใƒฉใƒกใƒผใ‚ฟใซๅฏพใ™ใ‚‹ๅˆถๅพกใ‚’ใ‚ˆใ‚Š่ฉณ็ดฐใซ่กŒใ„ใŸใ„ใƒฆใƒผใ‚ถใƒผใฏใ€ใ„ใใคใ‹ใฎๅŸบๆœฌใ‚ฏใƒฉใ‚นใ‹ใ‚‰ใ‚ซใ‚นใ‚ฟใƒ ๐Ÿค— Transformersใƒขใƒ‡ใƒซใ‚’ไฝœๆˆใงใใพใ™ใ€‚ใ“ใ‚Œใฏใ€๐Ÿค— Transformersใƒขใƒ‡ใƒซใ‚’็ ”็ฉถใ€ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ€ใพใŸใฏๅฎŸ้จ“ใ™ใ‚‹่ˆˆๅ‘ณใŒใ‚ใ‚‹ใƒฆใƒผใ‚ถใƒผใซ็‰นใซๅฝน็ซ‹ใคใ‹ใ‚‚ใ—ใ‚Œใพใ›ใ‚“ใ€‚ใ“ใฎใ‚ฌใ‚คใƒ‰ใงใฏใ€`AutoClass`ใ‚’ไฝฟ็”จใ—ใชใ„ใ‚ซใ‚นใ‚ฟใƒ ใƒขใƒ‡ใƒซใฎไฝœๆˆใซใคใ„ใฆ่ฉณใ—ใ่ชฌๆ˜Žใ—ใพใ™ใ€‚ๆฌกใฎๆ–นๆณ•ใ‚’ๅญฆใณใพใ™๏ผš - ใƒขใƒ‡ใƒซใฎ่จญๅฎšใ‚’ใƒญใƒผใƒ‰ใŠใ‚ˆใณใ‚ซใ‚นใ‚ฟใƒžใ‚คใ‚บใ™ใ‚‹ใ€‚ - ใƒขใƒ‡ใƒซใ‚ขใƒผใ‚ญใƒ†ใ‚ฏใƒใƒฃใ‚’ไฝœๆˆใ™ใ‚‹ใ€‚ - ใƒ†ใ‚ญใ‚นใƒˆ็”จใฎ้…ใ„ใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใจ้ซ˜้€Ÿใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใ‚’ไฝœๆˆใ™ใ‚‹ใ€‚ - ใƒ“ใ‚ธใƒงใƒณใ‚ฟใ‚นใ‚ฏ็”จใฎ็”ปๅƒใƒ—ใƒญใ‚ปใƒƒใ‚ตใ‚’ไฝœๆˆใ™ใ‚‹ใ€‚ - ใ‚ชใƒผใƒ‡ใ‚ฃใ‚ชใ‚ฟใ‚นใ‚ฏ็”จใฎ็‰นๅพดๆŠฝๅ‡บๅ™จใ‚’ไฝœๆˆใ™ใ‚‹ใ€‚ - ใƒžใƒซใƒใƒขใƒผใƒ€ใƒซใ‚ฟใ‚นใ‚ฏ็”จใฎใƒ—ใƒญใ‚ปใƒƒใ‚ตใ‚’ไฝœๆˆใ™ใ‚‹ใ€‚ ## Configuration [่จญๅฎš](main_classes/configuration)ใฏใ€ใƒขใƒ‡ใƒซใฎ็‰นๅฎšใฎๅฑžๆ€งใ‚’ๆŒ‡ใ—ใพใ™ใ€‚ๅ„ใƒขใƒ‡ใƒซใฎ่จญๅฎšใซใฏ็•ฐใชใ‚‹ๅฑžๆ€งใŒใ‚ใ‚Šใพใ™ใ€‚ใŸใจใˆใฐใ€ใ™ในใฆใฎNLPใƒขใƒ‡ใƒซใซใฏใ€`hidden_size`ใ€`num_attention_heads`ใ€`num_hidden_layers`ใ€ใŠใ‚ˆใณ`vocab_size`ๅฑžๆ€งใŒๅ…ฑ้€šใ—ใฆใ‚ใ‚Šใพใ™ใ€‚ใ“ใ‚Œใ‚‰ใฎๅฑžๆ€งใฏใ€ใƒขใƒ‡ใƒซใ‚’ๆง‹็ฏ‰ใ™ใ‚‹ใŸใ‚ใฎๆณจๆ„ใƒ˜ใƒƒใƒ‰ใฎๆ•ฐใ‚„้š ใ‚Œๅฑคใฎๆ•ฐใ‚’ๆŒ‡ๅฎšใ—ใพใ™ใ€‚ [DistilBERT](model_doc/distilbert)ใ‚’ใ‚ˆใ‚Š่ฉณใ—ใ่ชฟในใ‚‹ใŸใ‚ใซใ€[`DistilBertConfig`]ใซใ‚ขใ‚ฏใ‚ปใ‚นใ—ใฆใใฎๅฑžๆ€งใ‚’่ชฟในใฆใฟใพใ—ใ‚‡ใ†๏ผš ```py >>> from transformers import DistilBertConfig >>> config = DistilBertConfig() >>> print(config) DistilBertConfig { "activation": "gelu", "attention_dropout": 0.1, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` [`DistilBertConfig`]ใฏใ€ๅŸบๆœฌใฎ[`DistilBertModel`]ใ‚’ๆง‹็ฏ‰ใ™ใ‚‹ใŸใ‚ใซไฝฟ็”จใ•ใ‚Œใ‚‹ใ™ในใฆใฎใƒ‡ใƒ•ใ‚ฉใƒซใƒˆๅฑžๆ€งใ‚’่กจ็คบใ—ใพใ™ใ€‚ ใ™ในใฆใฎๅฑžๆ€งใฏใ‚ซใ‚นใ‚ฟใƒžใ‚คใ‚บๅฏ่ƒฝใงใ€ๅฎŸ้จ“ใฎใŸใ‚ใฎใ‚นใƒšใƒผใ‚นใ‚’ๆไพ›ใ—ใพใ™ใ€‚ไพ‹ใˆใฐใ€ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎใƒขใƒ‡ใƒซใ‚’ใ‚ซใ‚นใ‚ฟใƒžใ‚คใ‚บใ—ใฆไปฅไธ‹ใฎใ‚ˆใ†ใชใ“ใจใŒใงใใพใ™๏ผš - `activation`ใƒ‘ใƒฉใƒกใƒผใ‚ฟใง็•ฐใชใ‚‹ๆดปๆ€งๅŒ–้–ขๆ•ฐใ‚’่ฉฆใ™ใ€‚ - `attention_dropout`ใƒ‘ใƒฉใƒกใƒผใ‚ฟใงๆณจๆ„็ขบ็އใฎ้ซ˜ใ„ใƒ‰ใƒญใƒƒใƒ—ใ‚ขใ‚ฆใƒˆ็އใ‚’ไฝฟ็”จใ™ใ‚‹ใ€‚ ```py >>> my_config = DistilBertConfig(activation="relu", attention_dropout=0.4) >>> print(my_config) DistilBertConfig { "activation": "relu", "attention_dropout": 0.4, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` ไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใƒขใƒ‡ใƒซใฎๅฑžๆ€งใฏใ€[`~PretrainedConfig.from_pretrained`] ้–ขๆ•ฐใงๅค‰ๆ›ดใงใใพใ™๏ผš ```py >>> my_config = DistilBertConfig.from_pretrained("distilbert/distilbert-base-uncased", activation="relu", attention_dropout=0.4) ``` Once you are satisfied with your model configuration, you can save it with [`PretrainedConfig.save_pretrained`]. Your configuration file is stored as a JSON file in the specified save directory. ```py >>> my_config.save_pretrained(save_directory="./your_model_save_path") ``` ่จญๅฎšใƒ•ใ‚กใ‚คใƒซใ‚’ๅ†ๅˆฉ็”จใ™ใ‚‹ใซใฏใ€[`~PretrainedConfig.from_pretrained`]ใ‚’ไฝฟ็”จใ—ใฆใใ‚Œใ‚’ใƒญใƒผใƒ‰ใ—ใพใ™๏ผš ```py >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json") ``` <Tip> ใ‚ซใ‚นใ‚ฟใƒ ๆง‹ๆˆใƒ•ใ‚กใ‚คใƒซใ‚’่พžๆ›ธใจใ—ใฆไฟๅญ˜ใ™ใ‚‹ใ“ใจใ‚‚ใ€ใ‚ซใ‚นใ‚ฟใƒ ๆง‹ๆˆๅฑžๆ€งใจใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎๆง‹ๆˆๅฑžๆ€งใฎ้•ใ„ใ ใ‘ใ‚’ไฟๅญ˜ใ™ใ‚‹ใ“ใจใ‚‚ใงใใพใ™๏ผ่ฉณ็ดฐใซใคใ„ใฆใฏ[configuration](main_classes/configuration)ใฎใƒ‰ใ‚ญใƒฅใƒกใƒณใƒ†ใƒผใ‚ทใƒงใƒณใ‚’ใ”่ฆงใใ ใ•ใ„ใ€‚ </Tip> ## Model ๆฌกใฎใ‚นใƒ†ใƒƒใƒ—ใฏใ€[ใƒขใƒ‡ใƒซ](main_classes/models)ใ‚’ไฝœๆˆใ™ใ‚‹ใ“ใจใงใ™ใ€‚ใƒขใƒ‡ใƒซ๏ผˆใ‚ขใƒผใ‚ญใƒ†ใ‚ฏใƒใƒฃใจใ‚‚็ทฉใ่จ€ใ‚ใ‚Œใ‚‹ใ“ใจใŒใ‚ใ‚Šใพใ™๏ผ‰ใฏใ€ๅ„ใƒฌใ‚คใƒคใƒผใŒไฝ•ใ‚’ใ—ใฆใ„ใ‚‹ใ‹ใ€ใฉใฎๆ“ไฝœใŒ่กŒใ‚ใ‚Œใฆใ„ใ‚‹ใ‹ใ‚’ๅฎš็พฉใ—ใพใ™ใ€‚ๆง‹ๆˆใ‹ใ‚‰ใฎ `num_hidden_layers` ใฎใ‚ˆใ†ใชๅฑžๆ€งใฏใ‚ขใƒผใ‚ญใƒ†ใ‚ฏใƒใƒฃใ‚’ๅฎš็พฉใ™ใ‚‹ใŸใ‚ใซไฝฟ็”จใ•ใ‚Œใพใ™ใ€‚ ใ™ในใฆใฎใƒขใƒ‡ใƒซใฏ [`PreTrainedModel`] ใ‚’ใƒ™ใƒผใ‚นใ‚ฏใƒฉใ‚นใจใ—ใ€ๅ…ฅๅŠ›ๅŸ‹ใ‚่พผใฟใฎใƒชใ‚ตใ‚คใ‚บใ‚„ใ‚ปใƒซใƒ•ใ‚ขใƒ†ใƒณใ‚ทใƒงใƒณใƒ˜ใƒƒใƒ‰ใฎใƒ—ใƒซใƒผใƒ‹ใƒณใ‚ฐใชใฉใ€ๅ…ฑ้€šใฎใƒกใ‚ฝใƒƒใƒ‰ใŒใ„ใใคใ‹ใ‚ใ‚Šใพใ™ใ€‚ ใ•ใ‚‰ใซใ€ใ™ในใฆใฎใƒขใƒ‡ใƒซใฏ [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html)ใ€[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)ใ€ใพใŸใฏ [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) ใฎใ„ใšใ‚Œใ‹ใฎใ‚ตใƒ–ใ‚ฏใƒฉใ‚นใงใ‚‚ใ‚ใ‚Šใพใ™ใ€‚ใคใพใ‚Šใ€ใƒขใƒ‡ใƒซใฏใใ‚Œใžใ‚Œใฎใƒ•ใƒฌใƒผใƒ ใƒฏใƒผใ‚ฏใฎไฝฟ็”จๆณ•ใจไบ’ๆ›ๆ€งใŒใ‚ใ‚Šใพใ™ใ€‚ <frameworkcontent> <pt> ใƒขใƒ‡ใƒซใซใ‚ซใ‚นใ‚ฟใƒ ๆง‹ๆˆๅฑžๆ€งใ‚’ใƒญใƒผใƒ‰ใ—ใพใ™๏ผš ```py >>> from transformers import DistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json") >>> model = DistilBertModel(my_config) ``` ใ“ใ‚Œใซใ‚ˆใ‚Šใ€ไบ‹ๅ‰ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐๆธˆใฟใฎ้‡ใฟใงใฏใชใใƒฉใƒณใƒ€ใƒ ใชๅ€คใ‚’ๆŒใคใƒขใƒ‡ใƒซใŒไฝœๆˆใ•ใ‚Œใพใ™ใ€‚ ใ“ใ‚Œใฏใ€ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใŒ่กŒใ‚ใ‚Œใ‚‹ใพใงใ€ใพใ ๆœ‰็”จใชใ‚‚ใฎใจใ—ใฆไฝฟ็”จใ™ใ‚‹ใ“ใจใฏใงใใพใ›ใ‚“ใ€‚ ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใฏใ‚ณใ‚นใƒˆใจๆ™‚้–“ใŒใ‹ใ‹ใ‚‹ใƒ—ใƒญใ‚ปใ‚นใงใ™ใ€‚ ้€šๅธธใ€ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใซๅฟ…่ฆใชใƒชใ‚ฝใƒผใ‚นใฎไธ€้ƒจใ—ใ‹ไฝฟ็”จใ›ใšใ€ใ‚ˆใ‚Š้€Ÿใใ‚ˆใ‚Š่‰ฏใ„็ตๆžœใ‚’ๅพ—ใ‚‹ใŸใ‚ใซไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ™ใ‚‹ใ“ใจใŒ่‰ฏใ„ใงใ—ใ‚‡ใ†ใ€‚ [`~PreTrainedModel.from_pretrained`]ใ‚’ไฝฟ็”จใ—ใฆไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใƒขใƒ‡ใƒซใ‚’ไฝœๆˆใ—ใพใ™๏ผš ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` ไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใฎ้‡ใฟใ‚’ใƒญใƒผใƒ‰ใ™ใ‚‹้š›ใ€ใƒขใƒ‡ใƒซใŒ๐Ÿค— Transformersใซใ‚ˆใฃใฆๆไพ›ใ•ใ‚Œใฆใ„ใ‚‹ๅ ดๅˆใ€ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎใƒขใƒ‡ใƒซ่จญๅฎšใŒ่‡ชๅ‹•็š„ใซใƒญใƒผใƒ‰ใ•ใ‚Œใพใ™ใ€‚ใŸใ ใ—ใ€ๅฟ…่ฆใซๅฟœใ˜ใฆใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎใƒขใƒ‡ใƒซ่จญๅฎšๅฑžๆ€งใฎไธ€้ƒจใพใŸใฏใ™ในใฆใ‚’็‹ฌ่‡ชใฎใ‚‚ใฎใง็ฝฎใๆ›ใˆใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚ ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </pt> <tf> ใƒขใƒ‡ใƒซใซใ‚ซใ‚นใ‚ฟใƒ ่จญๅฎšๅฑžๆ€งใ‚’ใƒญใƒผใƒ‰ใ—ใฆใใ ใ•ใ„๏ผš ```py >>> from transformers import TFDistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") >>> tf_model = TFDistilBertModel(my_config) ``` ใ“ใ‚Œใซใ‚ˆใ‚Šใ€ไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใฎ้‡ใฟใงใฏใชใใƒฉใƒณใƒ€ใƒ ใชๅ€คใ‚’ๆŒใคใƒขใƒ‡ใƒซใŒไฝœๆˆใ•ใ‚Œใพใ™ใ€‚ ใ“ใฎใƒขใƒ‡ใƒซใ‚’ๆœ‰็”จใช็›ฎ็š„ใซใฏใพใ ไฝฟ็”จใ™ใ‚‹ใ“ใจใฏใงใใพใ›ใ‚“ใ€‚ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใฏใ‚ณใ‚นใƒˆใŒใ‹ใ‹ใ‚Šใ€ๆ™‚้–“ใŒใ‹ใ‹ใ‚‹ใƒ—ใƒญใ‚ปใ‚นใงใ™ใ€‚ ไธ€่ˆฌ็š„ใซใฏใ€ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใซๅฟ…่ฆใชใƒชใ‚ฝใƒผใ‚นใฎไธ€้ƒจใ—ใ‹ไฝฟ็”จใ›ใšใซใ€ใ‚ˆใ‚Š้€Ÿใๅ„ชใ‚ŒใŸ็ตๆžœใ‚’ๅพ—ใ‚‹ใŸใ‚ใซไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ™ใ‚‹ใ“ใจใŒ่‰ฏใ„ใงใ—ใ‚‡ใ†ใ€‚ [`~TFPreTrainedModel.from_pretrained`]ใ‚’ไฝฟ็”จใ—ใฆไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใƒขใƒ‡ใƒซใ‚’ไฝœๆˆใ—ใพใ™๏ผš ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` ไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใฎ้‡ใฟใ‚’ใƒญใƒผใƒ‰ใ™ใ‚‹้š›ใ€ใƒขใƒ‡ใƒซใŒ๐Ÿค— Transformersใซใ‚ˆใฃใฆๆไพ›ใ•ใ‚Œใฆใ„ใ‚‹ๅ ดๅˆใ€ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎใƒขใƒ‡ใƒซๆง‹ๆˆใŒ่‡ชๅ‹•็š„ใซใƒญใƒผใƒ‰ใ•ใ‚Œใพใ™ใ€‚ใŸใ ใ—ใ€ๅฟ…่ฆใงใ‚ใ‚Œใฐใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎใƒขใƒ‡ใƒซๆง‹ๆˆๅฑžๆ€งใฎไธ€้ƒจใพใŸใฏใ™ในใฆใ‚’็‹ฌ่‡ชใฎใ‚‚ใฎใง็ฝฎใๆ›ใˆใ‚‹ใ“ใจใ‚‚ใงใใพใ™๏ผš ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </tf> </frameworkcontent> ### Model heads ใ“ใฎๆ™‚็‚นใงใ€ใƒ™ใƒผใ‚นใฎDistilBERTใƒขใƒ‡ใƒซใŒใ‚ใ‚Šใ€ใ“ใ‚Œใฏ้š ใ‚ŒใŸ็Šถๆ…‹ใ‚’ๅ‡บๅŠ›ใ—ใพใ™ใ€‚้š ใ‚ŒใŸ็Šถๆ…‹ใฏใƒขใƒ‡ใƒซใฎใƒ˜ใƒƒใƒ‰ใธใฎๅ…ฅๅŠ›ใจใ—ใฆๆธกใ•ใ‚Œใ€ๆœ€็ต‚็š„ใชๅ‡บๅŠ›ใ‚’็”Ÿๆˆใ—ใพใ™ใ€‚๐Ÿค— Transformersใฏใ€ใƒขใƒ‡ใƒซใŒใใฎใ‚ฟใ‚นใ‚ฏใ‚’ใ‚ตใƒใƒผใƒˆใ—ใฆใ„ใ‚‹้™ใ‚Šใ€ๅ„ใ‚ฟใ‚นใ‚ฏใซๅฏพๅฟœใ™ใ‚‹็•ฐใชใ‚‹ใƒขใƒ‡ใƒซใƒ˜ใƒƒใƒ‰ใ‚’ๆไพ›ใ—ใพใ™๏ผˆใคใพใ‚Šใ€DistilBERTใ‚’็ฟป่จณใฎใ‚ˆใ†ใชใ‚ทใƒผใ‚ฑใƒณใ‚นๅฏพใ‚ทใƒผใ‚ฑใƒณใ‚นใ‚ฟใ‚นใ‚ฏใซไฝฟ็”จใ™ใ‚‹ใ“ใจใฏใงใใพใ›ใ‚“๏ผ‰ใ€‚ <frameworkcontent> <pt> ใŸใจใˆใฐใ€[`DistilBertForSequenceClassification`]ใฏใ€ใ‚ทใƒผใ‚ฑใƒณใ‚นๅˆ†้กžใƒ˜ใƒƒใƒ‰ใ‚’ๆŒใคใƒ™ใƒผใ‚นใฎDistilBERTใƒขใƒ‡ใƒซใงใ™ใ€‚ใ‚ทใƒผใ‚ฑใƒณใ‚นๅˆ†้กžใƒ˜ใƒƒใƒ‰ใฏใ€ใƒ—ใƒผใƒซใ•ใ‚ŒใŸๅ‡บๅŠ›ใฎไธŠใซใ‚ใ‚‹็ทšๅฝขๅฑคใงใ™ใ€‚ ```py >>> from transformers import DistilBertForSequenceClassification >>> model = DistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` ๆ–ฐใ—ใ„ใ‚ฟใ‚นใ‚ฏใซใ“ใฎใƒใ‚งใƒƒใ‚ฏใƒใ‚คใƒณใƒˆใ‚’็ฐกๅ˜ใซๅ†ๅˆฉ็”จใ™ใ‚‹ใซใฏใ€็•ฐใชใ‚‹ใƒขใƒ‡ใƒซใƒ˜ใƒƒใƒ‰ใซๅˆ‡ใ‚Šๆ›ฟใˆใพใ™ใ€‚ ่ณชๅ•ๅฟœ็ญ”ใ‚ฟใ‚นใ‚ฏใฎๅ ดๅˆใ€[`DistilBertForQuestionAnswering`] ใƒขใƒ‡ใƒซใƒ˜ใƒƒใƒ‰ใ‚’ไฝฟ็”จใ—ใพใ™ใ€‚ ่ณชๅ•ๅฟœ็ญ”ใƒ˜ใƒƒใƒ‰ใฏใ‚ทใƒผใ‚ฑใƒณใ‚นๅˆ†้กžใƒ˜ใƒƒใƒ‰ใจ้กžไผผใ—ใฆใ„ใพใ™ใŒใ€้š ใ‚Œ็Šถๆ…‹ใฎๅ‡บๅŠ›ใฎไธŠใซ็ทšๅฝขๅฑคใŒใ‚ใ‚Šใพใ™ใ€‚ ```py >>> from transformers import DistilBertForQuestionAnswering >>> model = DistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </pt> <tf> ไพ‹ใˆใฐใ€[`TFDistilBertForSequenceClassification`]ใฏใ€ใ‚ทใƒผใ‚ฑใƒณใ‚นๅˆ†้กžใƒ˜ใƒƒใƒ‰ใ‚’ๆŒใคใƒ™ใƒผใ‚นใฎDistilBERTใƒขใƒ‡ใƒซใงใ™ใ€‚ใ‚ทใƒผใ‚ฑใƒณใ‚นๅˆ†้กžใƒ˜ใƒƒใƒ‰ใฏใ€ใƒ—ใƒผใƒซใ•ใ‚ŒใŸๅ‡บๅŠ›ใฎไธŠใซใ‚ใ‚‹็ทšๅฝขๅฑคใงใ™ใ€‚ ```py >>> from transformers import TFDistilBertForSequenceClassification >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` ๅˆฅใฎใ‚ฟใ‚นใ‚ฏใซใ“ใฎใƒใ‚งใƒƒใ‚ฏใƒใ‚คใƒณใƒˆใ‚’็ฐกๅ˜ใซๅ†ๅˆฉ็”จใ™ใ‚‹ใ“ใจใŒใงใใ€็•ฐใชใ‚‹ใƒขใƒ‡ใƒซใƒ˜ใƒƒใƒ‰ใซๅˆ‡ใ‚Šๆ›ฟใˆใ‚‹ใ ใ‘ใงใ™ใ€‚ ่ณชๅ•ๅฟœ็ญ”ใ‚ฟใ‚นใ‚ฏใฎๅ ดๅˆใ€[`TFDistilBertForQuestionAnswering`]ใƒขใƒ‡ใƒซใƒ˜ใƒƒใƒ‰ใ‚’ไฝฟ็”จใ—ใพใ™ใ€‚ ่ณชๅ•ๅฟœ็ญ”ใƒ˜ใƒƒใƒ‰ใฏใ‚ทใƒผใ‚ฑใƒณใ‚นๅˆ†้กžใƒ˜ใƒƒใƒ‰ใจไผผใฆใ„ใพใ™ใŒใ€้š ใ‚Œ็Šถๆ…‹ใฎๅ‡บๅŠ›ใฎไธŠใซ็ทšๅฝขๅฑคใŒใ‚ใ‚‹ใ ใ‘ใงใ™ใ€‚ ```py >>> from transformers import TFDistilBertForQuestionAnswering >>> tf_model = TFDistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </tf> </frameworkcontent> ## Tokenizer ใƒ†ใ‚ญใ‚นใƒˆใƒ‡ใƒผใ‚ฟใ‚’ใƒขใƒ‡ใƒซใงไฝฟ็”จใ™ใ‚‹ๅ‰ใซๅฟ…่ฆใชๆœ€ๅพŒใฎใƒ™ใƒผใ‚นใ‚ฏใƒฉใ‚นใฏใ€็”Ÿใฎใƒ†ใ‚ญใ‚นใƒˆใ‚’ใƒ†ใƒณใ‚ฝใƒซใซๅค‰ๆ›ใ™ใ‚‹ใŸใ‚ใฎ[ใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถ](main_classes/tokenizer)ใงใ™ใ€‚ ๐Ÿค— Transformersใงไฝฟ็”จใงใใ‚‹2ใคใฎใ‚ฟใ‚คใƒ—ใฎใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใŒใ‚ใ‚Šใพใ™๏ผš - [`PreTrainedTokenizer`]: ใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใฎPythonๅฎŸ่ฃ…ใงใ™ใ€‚ - [`PreTrainedTokenizerFast`]: Rustใƒ™ใƒผใ‚นใฎ[๐Ÿค— Tokenizer](https://huggingface.co/docs/tokenizers/python/latest/)ใƒฉใ‚คใƒ–ใƒฉใƒชใ‹ใ‚‰ใฎใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใงใ™ใ€‚ ใ“ใฎใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใฎใ‚ฟใ‚คใƒ—ใฏใ€ใใฎRustๅฎŸ่ฃ…ใซใ‚ˆใ‚Šใ€็‰นใซใƒใƒƒใƒใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ผใƒผใ‚ทใƒงใƒณไธญใซ้ซ˜้€Ÿใงใ™ใ€‚ ้ซ˜้€Ÿใชใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใฏใ€ใƒˆใƒผใ‚ฏใƒณใ‚’ๅ…ƒใฎๅ˜่ชžใพใŸใฏๆ–‡ๅญ—ใซใƒžใƒƒใƒ”ใƒณใ‚ฐใ™ใ‚‹*ใ‚ชใƒ•ใ‚ปใƒƒใƒˆใƒžใƒƒใƒ”ใƒณใ‚ฐ*ใชใฉใฎ่ฟฝๅŠ ใƒกใ‚ฝใƒƒใƒ‰ใ‚‚ๆไพ›ใ—ใพใ™ใ€‚ ไธกๆ–นใฎใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใฏใ€ใ‚จใƒณใ‚ณใƒผใƒ‰ใจใƒ‡ใ‚ณใƒผใƒ‰ใ€ๆ–ฐใ—ใ„ใƒˆใƒผใ‚ฏใƒณใฎ่ฟฝๅŠ ใ€็‰นๅˆฅใชใƒˆใƒผใ‚ฏใƒณใฎ็ฎก็†ใชใฉใ€ๅ…ฑ้€šใฎใƒกใ‚ฝใƒƒใƒ‰ใ‚’ใ‚ตใƒใƒผใƒˆใ—ใฆใ„ใพใ™ใ€‚ <Tip warning={true}> ใ™ในใฆใฎใƒขใƒ‡ใƒซใŒ้ซ˜้€Ÿใชใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใ‚’ใ‚ตใƒใƒผใƒˆใ—ใฆใ„ใ‚‹ใ‚ใ‘ใงใฏใ‚ใ‚Šใพใ›ใ‚“ใ€‚ ใƒขใƒ‡ใƒซใŒ้ซ˜้€Ÿใชใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใ‚’ใ‚ตใƒใƒผใƒˆใ—ใฆใ„ใ‚‹ใ‹ใฉใ†ใ‹ใ‚’็ขบ่ชใ™ใ‚‹ใซใฏใ€ใ“ใฎ[่กจ](index#supported-frameworks)ใ‚’ใ”่ฆงใใ ใ•ใ„ใ€‚ </Tip> ็‹ฌ่‡ชใฎใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใ‚’ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ—ใŸๅ ดๅˆใ€*ใƒœใ‚ญใƒฃใƒ–ใƒฉใƒชใƒผ*ใƒ•ใ‚กใ‚คใƒซใ‹ใ‚‰ใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใ‚’ไฝœๆˆใงใใพใ™ใ€‚ ```py >>> from transformers import DistilBertTokenizer >>> my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left") ``` ใ‚ซใ‚นใ‚ฟใƒ ใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใƒผใ‹ใ‚‰็”Ÿๆˆใ•ใ‚Œใ‚‹่ชžๅฝ™ใฏใ€ไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใƒขใƒ‡ใƒซใฎใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใƒผใŒ็”Ÿๆˆใ™ใ‚‹่ชžๅฝ™ใจใฏ็•ฐใชใ‚‹ใ“ใจใ‚’่ฆšใˆใฆใŠใใ“ใจใฏ้‡่ฆใงใ™ใ€‚ ไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ™ใ‚‹ๅ ดๅˆใฏใ€ไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใƒขใƒ‡ใƒซใฎ่ชžๅฝ™ใ‚’ไฝฟ็”จใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ใใ†ใ—ใชใ„ใจใ€ๅ…ฅๅŠ›ใŒๆ„ๅ‘ณใ‚’ใชใ•ใชใใชใ‚Šใพใ™ใ€‚ [`DistilBertTokenizer`]ใ‚ฏใƒฉใ‚นใ‚’ไฝฟ็”จใ—ใฆใ€ไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใƒขใƒ‡ใƒซใฎ่ชžๅฝ™ใ‚’ๆŒใคใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใƒผใ‚’ไฝœๆˆใ—ใพใ™: ```py >>> from transformers import DistilBertTokenizer >>> slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` [`DistilBertTokenizerFast`]ใ‚ฏใƒฉใ‚นใ‚’ไฝฟ็”จใ—ใฆ้ซ˜้€Ÿใชใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใ‚’ไฝœๆˆใ—ใพใ™๏ผš ```py >>> from transformers import DistilBertTokenizerFast >>> fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip> ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใงใฏใ€[`AutoTokenizer`]ใฏ้ซ˜้€Ÿใชใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใ‚’่ชญใฟ่พผใ‚‚ใ†ใจใ—ใพใ™ใ€‚`from_pretrained`ๅ†…ใง`use_fast=False`ใ‚’่จญๅฎšใ™ใ‚‹ใ“ใจใงใ€ใ“ใฎๅ‹•ไฝœใ‚’็„กๅŠนใซใ™ใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚ </Tip> ## Image Processor ็”ปๅƒใƒ—ใƒญใ‚ปใƒƒใ‚ตใฏใƒ“ใ‚ธใƒงใƒณๅ…ฅๅŠ›ใ‚’ๅ‡ฆ็†ใ—ใพใ™ใ€‚ใ“ใ‚ŒใฏๅŸบๆœฌใ‚ฏใƒฉใ‚น [`~image_processing_utils.ImageProcessingMixin`] ใ‚’็ถ™ๆ‰ฟใ—ใฆใ„ใพใ™ใ€‚ ไฝฟ็”จใ™ใ‚‹ใซใฏใ€ไฝฟ็”จใ—ใฆใ„ใ‚‹ใƒขใƒ‡ใƒซใซ้–ข้€ฃไป˜ใ‘ใ‚‰ใ‚ŒใŸ็”ปๅƒใƒ—ใƒญใ‚ปใƒƒใ‚ตใ‚’ไฝœๆˆใ—ใพใ™ใ€‚ ใŸใจใˆใฐใ€็”ปๅƒๅˆ†้กžใซ[ViT](model_doc/vit)ใ‚’ไฝฟ็”จใ™ใ‚‹ๅ ดๅˆใ€ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎ [`ViTImageProcessor`] ใ‚’ไฝœๆˆใ—ใพใ™ใ€‚ ```py >>> from transformers import ViTImageProcessor >>> vit_extractor = ViTImageProcessor() >>> print(vit_extractor) ViTImageProcessor { "do_normalize": true, "do_resize": true, "image_processor_type": "ViTImageProcessor", "image_mean": [ 0.5, 0.5, 0.5 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": 2, "size": 224 } ``` <Tip> ใ‚ซใ‚นใ‚ฟใƒžใ‚คใ‚บใ‚’ๅฟ…่ฆใจใ—ใชใ„ๅ ดๅˆใ€ใƒขใƒ‡ใƒซใฎใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎ็”ปๅƒใƒ—ใƒญใ‚ปใƒƒใ‚ตใƒ‘ใƒฉใƒกใƒผใ‚ฟใ‚’ใƒญใƒผใƒ‰ใ™ใ‚‹ใซใฏใ€ๅ˜็ด”ใซ`from_pretrained`ใƒกใ‚ฝใƒƒใƒ‰ใ‚’ไฝฟ็”จใ—ใฆใใ ใ•ใ„ใ€‚ </Tip> [`ViTImageProcessor`]ใฎใƒ‘ใƒฉใƒกใƒผใ‚ฟใ‚’ๅค‰ๆ›ดใ—ใฆใ€ใ‚ซใ‚นใ‚ฟใƒ ใฎ็”ปๅƒใƒ—ใƒญใ‚ปใƒƒใ‚ตใ‚’ไฝœๆˆใงใใพใ™๏ผš ```py >>> from transformers import ViTImageProcessor >>> my_vit_extractor = ViTImageProcessor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3]) >>> print(my_vit_extractor) ViTImageProcessor { "do_normalize": false, "do_resize": true, "image_processor_type": "ViTImageProcessor", "image_mean": [ 0.3, 0.3, 0.3 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": "PIL.Image.BOX", "size": 224 } ``` ## Feature Extractor ใƒ•ใ‚ฃใƒผใƒใƒฃใƒผๆŠฝๅ‡บๅ™จใฏ้Ÿณๅฃฐๅ…ฅๅŠ›ใ‚’ๅ‡ฆ็†ใ—ใพใ™ใ€‚ใ“ใ‚ŒใฏๅŸบๆœฌ็š„ใช [`~feature_extraction_utils.FeatureExtractionMixin`] ใ‚ฏใƒฉใ‚นใ‹ใ‚‰็ถ™ๆ‰ฟใ•ใ‚Œใ€้Ÿณๅฃฐๅ…ฅๅŠ›ใ‚’ๅ‡ฆ็†ใ™ใ‚‹ใŸใ‚ใฎ [`SequenceFeatureExtractor`] ใ‚ฏใƒฉใ‚นใ‹ใ‚‰ใ‚‚็ถ™ๆ‰ฟใ•ใ‚Œใ‚‹ใ“ใจใŒใ‚ใ‚Šใพใ™ใ€‚ ไฝฟ็”จใ™ใ‚‹ใซใฏใ€ใƒขใƒ‡ใƒซใซ้–ข้€ฃไป˜ใ‘ใ‚‰ใ‚ŒใŸใƒ•ใ‚ฃใƒผใƒใƒฃใƒผๆŠฝๅ‡บๅ™จใ‚’ไฝœๆˆใ—ใพใ™ใ€‚ใŸใจใˆใฐใ€้Ÿณๅฃฐๅˆ†้กžใซ [Wav2Vec2](model_doc/wav2vec2) ใ‚’ไฝฟ็”จใ™ใ‚‹ๅ ดๅˆใ€ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎ [`Wav2Vec2FeatureExtractor`] ใ‚’ไฝœๆˆใ—ใพใ™ใ€‚ ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor() >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": true, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 16000 } ``` <Tip> ใ‚ซใ‚นใ‚ฟใƒžใ‚คใ‚บใ‚’่กŒใ‚ใชใ„ๅ ดๅˆใ€ใƒขใƒ‡ใƒซใฎใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎ็‰นๅพดๆŠฝๅ‡บๅ™จใƒ‘ใƒฉใƒกใƒผใ‚ฟใƒผใ‚’ใƒญใƒผใƒ‰ใ™ใ‚‹ใซใฏใ€ๅ˜ใซ `from_pretrained` ใƒกใ‚ฝใƒƒใƒ‰ใ‚’ไฝฟ็”จใ—ใฆใใ ใ•ใ„ใ€‚ </Tip> [`Wav2Vec2FeatureExtractor`] ใฎใƒ‘ใƒฉใƒกใƒผใ‚ฟใƒผใ‚’ๅค‰ๆ›ดใ—ใฆใ€ใ‚ซใ‚นใ‚ฟใƒ ็‰นๅพดๆŠฝๅ‡บๅ™จใ‚’ไฝœๆˆใงใใพใ™: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor(sampling_rate=8000, do_normalize=False) >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": false, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 8000 } ``` ## Processor ใƒžใƒซใƒใƒขใƒผใƒ€ใƒซใ‚ฟใ‚นใ‚ฏใ‚’ใ‚ตใƒใƒผใƒˆใ™ใ‚‹ใƒขใƒ‡ใƒซใซๅฏพใ—ใฆใ€๐Ÿค— Transformersใฏไพฟๅˆฉใชใƒ—ใƒญใ‚ปใƒƒใ‚ตใ‚ฏใƒฉใ‚นใ‚’ๆไพ›ใ—ใฆใ„ใพใ™ใ€‚ ใ“ใฎใƒ—ใƒญใ‚ปใƒƒใ‚ตใ‚ฏใƒฉใ‚นใฏใ€็‰นๅพด้‡ๆŠฝๅ‡บๅ™จใ‚„ใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใชใฉใฎๅ‡ฆ็†ใ‚ฏใƒฉใ‚นใ‚’ไพฟๅˆฉใซใƒฉใƒƒใƒ—ใ—ใ€ๅ˜ไธ€ใฎใ‚ชใƒ–ใ‚ธใ‚งใ‚ฏใƒˆใซ็ตๅˆใ—ใพใ™ใ€‚ ใŸใจใˆใฐใ€่‡ชๅ‹•้Ÿณๅฃฐ่ช่ญ˜ใ‚ฟใ‚นใ‚ฏ๏ผˆASR๏ผ‰็”จใซ[`Wav2Vec2Processor`]ใ‚’ไฝฟ็”จใ—ใฆใฟใพใ—ใ‚‡ใ†ใ€‚ ASRใฏ้Ÿณๅฃฐใ‚’ใƒ†ใ‚ญใ‚นใƒˆใซ่ปขๅ†™ใ™ใ‚‹ใ‚ฟใ‚นใ‚ฏใงใ‚ใ‚Šใ€้Ÿณๅฃฐๅ…ฅๅŠ›ใ‚’ๅ‡ฆ็†ใ™ใ‚‹ใŸใ‚ใซ็‰นๅพด้‡ๆŠฝๅ‡บๅ™จใจใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใŒๅฟ…่ฆใงใ™ใ€‚ ้Ÿณๅฃฐๅ…ฅๅŠ›ใ‚’ๅ‡ฆ็†ใ™ใ‚‹็‰นๅพด้‡ๆŠฝๅ‡บๅ™จใ‚’ไฝœๆˆใ—ใพใ™๏ผš ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True) ``` ใƒ†ใ‚ญใ‚นใƒˆๅ…ฅๅŠ›ใ‚’ๅ‡ฆ็†ใ™ใ‚‹ใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใ‚’ไฝœๆˆใ—ใพใ™: ```py >>> from transformers import Wav2Vec2CTCTokenizer >>> tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt") ``` [`Wav2Vec2Processor`]ใง็‰นๅพด้‡ๆŠฝๅ‡บๅ™จใจใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใ‚’็ต„ใฟๅˆใ‚ใ›ใพใ™๏ผš ```py >>> from transformers import Wav2Vec2Processor >>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) ``` ไบŒใคใฎๅŸบๆœฌใ‚ฏใƒฉใ‚น - ่จญๅฎšใจใƒขใƒ‡ใƒซ - ใŠใ‚ˆใณ่ฟฝๅŠ ใฎๅ‰ๅ‡ฆ็†ใ‚ฏใƒฉใ‚น๏ผˆใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใ€็”ปๅƒใƒ—ใƒญใ‚ปใƒƒใ‚ตใ€็‰นๅพดๆŠฝๅ‡บๅ™จใ€ใพใŸใฏใƒ—ใƒญใ‚ปใƒƒใ‚ต๏ผ‰ใ‚’ไฝฟ็”จใ™ใ‚‹ใ“ใจใงใ€๐Ÿค— Transformers ใŒใ‚ตใƒใƒผใƒˆใ™ใ‚‹ใƒขใƒ‡ใƒซใฎใ„ใšใ‚Œใ‹ใ‚’ไฝœๆˆใงใใพใ™ใ€‚ใ“ใ‚Œใ‚‰ใฎๅŸบๆœฌใ‚ฏใƒฉใ‚นใฏ่จญๅฎšๅฏ่ƒฝใงใ€ๅฟ…่ฆใช็‰นๆ€งใ‚’ไฝฟ็”จใงใใพใ™ใ€‚ใƒขใƒ‡ใƒซใ‚’ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐ็”จใซ็ฐกๅ˜ใซใ‚ปใƒƒใƒˆใ‚ขใƒƒใƒ—ใ—ใŸใ‚Šใ€ๆ—ขๅญ˜ใฎไบ‹ๅ‰ๅญฆ็ฟ’ๆธˆใฟใƒขใƒ‡ใƒซใ‚’ๅพฎ่ชฟๆ•ดใ™ใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚
transformers/docs/source/ja/create_a_model.md/0
{ "file_path": "transformers/docs/source/ja/create_a_model.md", "repo_id": "transformers", "token_count": 8236 }
421
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quantize ๐Ÿค— Transformers models ## `AutoGPTQ` Integration ๐Ÿค— Transformers ใซใฏใ€่จ€่ชžใƒขใƒ‡ใƒซใง GPTQ ้‡ๅญๅŒ–ใ‚’ๅฎŸ่กŒใ™ใ‚‹ใŸใ‚ใฎ `optimum` API ใŒ็ตฑๅˆใ•ใ‚Œใฆใ„ใพใ™ใ€‚ใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นใ‚’ๅคงๅน…ใซไฝŽไธ‹ใ•ใ›ใ‚‹ใ“ใจใชใใ€ๆŽจ่ซ–้€Ÿๅบฆใ‚’้ซ˜้€ŸๅŒ–ใ™ใ‚‹ใ“ใจใชใใ€ใƒขใƒ‡ใƒซใ‚’ 8ใ€4ใ€3ใ€ใ•ใ‚‰ใซใฏ 2 ใƒ“ใƒƒใƒˆใงใƒญใƒผใƒ‰ใŠใ‚ˆใณ้‡ๅญๅŒ–ใงใใพใ™ใ€‚ใ“ใ‚Œใฏใ€ใปใจใ‚“ใฉใฎ GPU ใƒใƒผใƒ‰ใ‚ฆใ‚งใ‚ขใงใ‚ตใƒใƒผใƒˆใ•ใ‚Œใฆใ„ใพใ™ใ€‚ ้‡ๅญๅŒ–ใƒขใƒ‡ใƒซใฎ่ฉณ็ดฐใซใคใ„ใฆใฏใ€ไปฅไธ‹ใ‚’็ขบ่ชใ—ใฆใใ ใ•ใ„ใ€‚ - [GPTQ](https://huggingface.co/papers/2210.17323) ่ซ–ๆ–‡ - GPTQ ้‡ๅญๅŒ–ใซ้–ขใ™ใ‚‹ `optimum` [ใ‚ฌใ‚คใƒ‰](https://huggingface.co/docs/optimum/llm_quantization/usage_guides/quantization) - ใƒใƒƒใ‚ฏใ‚จใƒณใƒ‰ใจใ—ใฆไฝฟ็”จใ•ใ‚Œใ‚‹ [`AutoGPTQ`](https://github.com/PanQiWei/AutoGPTQ) ใƒฉใ‚คใƒ–ใƒฉใƒช ### Requirements ไปฅไธ‹ใฎใ‚ณใƒผใƒ‰ใ‚’ๅฎŸ่กŒใ™ใ‚‹ใซใฏใ€ไปฅไธ‹ใฎ่ฆไปถใŒใ‚คใƒณใ‚นใƒˆใƒผใƒซใ•ใ‚Œใฆใ„ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™๏ผš - ๆœ€ๆ–ฐใฎ `AutoGPTQ` ใƒฉใ‚คใƒ–ใƒฉใƒชใ‚’ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ใ€‚ `pip install auto-gptq` ใ‚’ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ใ€‚ - ๆœ€ๆ–ฐใฎ `optimum` ใ‚’ใ‚ฝใƒผใ‚นใ‹ใ‚‰ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ใ€‚ `git+https://github.com/huggingface/optimum.git` ใ‚’ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ใ€‚ - ๆœ€ๆ–ฐใฎ `transformers` ใ‚’ใ‚ฝใƒผใ‚นใ‹ใ‚‰ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ใ€‚ ๆœ€ๆ–ฐใฎ `transformers` ใ‚’ใ‚ฝใƒผใ‚นใ‹ใ‚‰ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ `pip install git+https://github.com/huggingface/transformers.git` - ๆœ€ๆ–ฐใฎ `accelerate` ใƒฉใ‚คใƒ–ใƒฉใƒชใ‚’ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ใ€‚ `pip install --upgrade accelerate` ใ‚’ๅฎŸ่กŒใ™ใ‚‹ใ€‚ GPTQ็ตฑๅˆใฏไปŠใฎใจใ“ใ‚ใƒ†ใ‚ญใ‚นใƒˆใƒขใƒ‡ใƒซใฎใฟใ‚’ใ‚ตใƒใƒผใƒˆใ—ใฆใ„ใ‚‹ใฎใงใ€่ฆ–่ฆšใ€้Ÿณๅฃฐใ€ใƒžใƒซใƒใƒขใƒผใƒ€ใƒซใƒขใƒ‡ใƒซใงใฏไบˆๆœŸใ›ใฌๆŒ™ๅ‹•ใซ้ญ้‡ใ™ใ‚‹ใ‹ใ‚‚ใ—ใ‚Œใชใ„ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ ### Load and quantize a model GPTQ ใฏใ€้‡ๅญๅŒ–ใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ™ใ‚‹ๅ‰ใซ้‡ใฟใฎใ‚ญใƒฃใƒชใƒ–ใƒฌใƒผใ‚ทใƒงใƒณใ‚’ๅฟ…่ฆใจใ™ใ‚‹้‡ๅญๅŒ–ๆ–นๆณ•ใงใ™ใ€‚ใƒˆใƒฉใƒณใ‚นใƒ•ใ‚ฉใƒผใƒžใƒผ ใƒขใƒ‡ใƒซใ‚’ๆœ€ๅˆใ‹ใ‚‰้‡ๅญๅŒ–ใ™ใ‚‹ๅ ดๅˆใฏใ€้‡ๅญๅŒ–ใƒขใƒ‡ใƒซใ‚’ไฝœๆˆใ™ใ‚‹ใพใงใซๆ™‚้–“ใŒใ‹ใ‹ใ‚‹ใ“ใจใŒใ‚ใ‚Šใพใ™ (`facebook/opt-350m`ใƒขใƒ‡ใƒซใฎ Google colab ใงใฏ็ด„ 5 ๅˆ†)ใ€‚ ใ—ใŸใŒใฃใฆใ€GPTQ ้‡ๅญๅŒ–ใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ™ใ‚‹ใ‚ทใƒŠใƒชใ‚ชใฏ 2 ใคใ‚ใ‚Šใพใ™ใ€‚ๆœ€ๅˆใฎไฝฟ็”จไพ‹ใฏใ€ใƒใƒ–ใงๅˆฉ็”จๅฏ่ƒฝใชไป–ใฎใƒฆใƒผใ‚ถใƒผใซใ‚ˆใฃใฆใ™ใงใซ้‡ๅญๅŒ–ใ•ใ‚ŒใŸใƒขใƒ‡ใƒซใ‚’ใƒญใƒผใƒ‰ใ™ใ‚‹ใ“ใจใงใ™ใ€‚2 ็•ช็›ฎใฎไฝฟ็”จไพ‹ใฏใ€ใƒขใƒ‡ใƒซใ‚’ๆœ€ๅˆใ‹ใ‚‰้‡ๅญๅŒ–ใ—ใ€ไฟๅญ˜ใ™ใ‚‹ใ‹ใƒใƒ–ใซใƒ—ใƒƒใ‚ทใƒฅใ—ใฆใ€ไป–ใฎใƒฆใƒผใ‚ถใƒผใŒไฝฟ็”จใงใใ‚‹ใ‚ˆใ†ใซใ™ใ‚‹ใ“ใจใงใ™ใ€‚ใใ‚Œใ‚‚ไฝฟใฃใฆใใ ใ•ใ„ใ€‚ #### GPTQ Configuration ใƒขใƒ‡ใƒซใ‚’ใƒญใƒผใƒ‰ใ—ใฆ้‡ๅญๅŒ–ใ™ใ‚‹ใซใฏใ€[`GPTQConfig`] ใ‚’ไฝœๆˆใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใ‚’ๆบ–ๅ‚™ใ™ใ‚‹ใซใฏใ€`bits`ใฎๆ•ฐใ€้‡ๅญๅŒ–ใ‚’่ชฟๆ•ดใ™ใ‚‹ใŸใ‚ใฎ`dataset`ใ€ใŠใ‚ˆใณใƒขใƒ‡ใƒซใฎ`Tokenizer`ใ‚’ๆธกใ™ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ ```python model_id = "facebook/opt-125m" tokenizer = AutoTokenizer.from_pretrained(model_id) gptq_config = GPTQConfig(bits=4, dataset = "c4", tokenizer=tokenizer) ``` ็‹ฌ่‡ชใฎใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใ‚’ๆ–‡ๅญ—ๅˆ—ใฎใƒชใ‚นใƒˆใจใ—ใฆๆธกใ™ใ“ใจใŒใงใใ‚‹ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ใŸใ ใ—ใ€GPTQ ่ซ–ๆ–‡ใฎใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใ‚’ไฝฟ็”จใ™ใ‚‹ใ“ใจใ‚’ๅผทใใŠๅ‹งใ‚ใ—ใพใ™ใ€‚ ```python dataset = ["auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."] quantization = GPTQConfig(bits=4, dataset = dataset, tokenizer=tokenizer) ``` #### Quantization `from_pretrained` ใ‚’ไฝฟ็”จใ—ใ€`quantization_config` ใ‚’่จญๅฎšใ™ใ‚‹ใ“ใจใงใƒขใƒ‡ใƒซใ‚’้‡ๅญๅŒ–ใงใใพใ™ใ€‚ ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=gptq_config) ``` ใƒขใƒ‡ใƒซใ‚’้‡ๅญๅŒ–ใ™ใ‚‹ใซใฏ GPU ใŒๅฟ…่ฆใงใ‚ใ‚‹ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ใƒขใƒ‡ใƒซใ‚’ CPU ใซ้…็ฝฎใ—ใ€้‡ๅญๅŒ–ใ™ใ‚‹ใŸใ‚ใซใƒขใ‚ธใƒฅใƒผใƒซใ‚’ GPU ใซๅ‰ๅพŒใซ็งปๅ‹•ใ•ใ›ใพใ™ใ€‚ CPU ใ‚ชใƒ•ใƒญใƒผใƒ‰ใฎไฝฟ็”จไธญใซ GPU ใฎไฝฟ็”จ้‡ใ‚’ๆœ€ๅคงๅŒ–ใ—ใŸใ„ๅ ดๅˆใฏใ€`device_map = "auto"` ใ‚’่จญๅฎšใงใใพใ™ใ€‚ ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=gptq_config) ``` ใƒ‡ใ‚ฃใ‚นใ‚ฏ ใ‚ชใƒ•ใƒญใƒผใƒ‰ใฏใ‚ตใƒใƒผใƒˆใ•ใ‚Œใฆใ„ใชใ„ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ใ•ใ‚‰ใซใ€ใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใŒๅŽŸๅ› ใงใƒกใƒขใƒชใŒไธ่ถณใ—ใฆใ„ใ‚‹ๅ ดๅˆใฏใ€`from_pretained` ใง `max_memory` ใ‚’ๆธกใ™ๅฟ…่ฆใŒใ‚ใ‚‹ๅ ดๅˆใŒใ‚ใ‚Šใพใ™ใ€‚ `device_map`ใจ`max_memory`ใฎ่ฉณ็ดฐใซใคใ„ใฆใฏใ€ใ“ใฎ [ใ‚ฌใ‚คใƒ‰](https://huggingface.co/docs/accelerate/usage_guides/big_modeling#designing-a-device-map) ใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„ใ€‚ <Tip warning={true}> GPTQ ้‡ๅญๅŒ–ใฏใ€็พๆ™‚็‚นใงใฏใƒ†ใ‚ญใ‚นใƒˆ ใƒขใƒ‡ใƒซใงใฎใฟๆฉŸ่ƒฝใ—ใพใ™ใ€‚ใ•ใ‚‰ใซใ€้‡ๅญๅŒ–ใƒ—ใƒญใ‚ปใ‚นใฏใƒใƒผใƒ‰ใ‚ฆใ‚งใ‚ขใซใ‚ˆใฃใฆใฏ้•ทๆ™‚้–“ใ‹ใ‹ใ‚‹ๅ ดๅˆใŒใ‚ใ‚Šใพใ™ (NVIDIA A100 ใ‚’ไฝฟ็”จใ—ใŸๅ ดๅˆใ€175B ใƒขใƒ‡ใƒซ = 4 gpu ๆ™‚้–“)ใ€‚ใƒขใƒ‡ใƒซใฎ GPTQ ้‡ๅญๅŒ–ใƒใƒผใ‚ธใƒงใƒณใŒๅญ˜ๅœจใ—ใชใ„ๅ ดๅˆใฏใ€ใƒใƒ–ใง็ขบ่ชใ—ใฆใใ ใ•ใ„ใ€‚ใใ†ใงใชใ„ๅ ดๅˆใฏใ€github ใง่ฆๆฑ‚ใ‚’้€ไฟกใงใใพใ™ใ€‚ </Tip> ### Push quantized model to ๐Ÿค— Hub ไป–ใฎ ๐Ÿค— ใƒขใƒ‡ใƒซใจๅŒๆง˜ใซใ€`push_to_hub` ใ‚’ไฝฟ็”จใ—ใฆ้‡ๅญๅŒ–ใƒขใƒ‡ใƒซใ‚’ใƒใƒ–ใซใƒ—ใƒƒใ‚ทใƒฅใงใใพใ™ใ€‚้‡ๅญๅŒ–ๆง‹ๆˆใฏไฟๅญ˜ใ•ใ‚Œใ€ใƒขใƒ‡ใƒซใซๆฒฟใฃใฆใƒ—ใƒƒใ‚ทใƒฅใ•ใ‚Œใพใ™ใ€‚ ```python quantized_model.push_to_hub("opt-125m-gptq") tokenizer.push_to_hub("opt-125m-gptq") ``` ้‡ๅญๅŒ–ใ•ใ‚ŒใŸใƒขใƒ‡ใƒซใ‚’ใƒญใƒผใ‚ซใƒซ ใƒžใ‚ทใƒณใซไฟๅญ˜ใ—ใŸใ„ๅ ดๅˆใฏใ€`save_pretrained` ใ‚’ไฝฟ็”จใ—ใฆ่กŒใ†ใ“ใจใ‚‚ใงใใพใ™ใ€‚ ```python quantized_model.save_pretrained("opt-125m-gptq") tokenizer.save_pretrained("opt-125m-gptq") ``` `device_map` ใ‚’ไฝฟ็”จใ—ใฆใƒขใƒ‡ใƒซใ‚’้‡ๅญๅŒ–ใ—ใŸๅ ดๅˆใฏใ€ไฟๅญ˜ใ™ใ‚‹ๅ‰ใซใƒขใƒ‡ใƒซๅ…จไฝ“ใ‚’ GPU ใพใŸใฏ `cpu` ใฎใ„ใšใ‚Œใ‹ใซ็งปๅ‹•ใ—ใฆใใ ใ•ใ„ใ€‚ ```python quantized_model.to("cpu") quantized_model.save_pretrained("opt-125m-gptq") ``` ### Load a quantized model from the ๐Ÿค— Hub `from_pretrained`ใ‚’ไฝฟ็”จใ—ใฆใ€้‡ๅญๅŒ–ใ•ใ‚ŒใŸใƒขใƒ‡ใƒซใ‚’ใƒใƒ–ใ‹ใ‚‰ใƒญใƒผใƒ‰ใงใใพใ™ใ€‚ ๅฑžๆ€ง `quantization_config` ใŒใƒขใƒ‡ใƒซ่จญๅฎšใ‚ชใƒ–ใ‚ธใ‚งใ‚ฏใƒˆใซๅญ˜ๅœจใ™ใ‚‹ใ“ใจใ‚’็ขบ่ชใ—ใฆใ€ใƒ—ใƒƒใ‚ทใƒฅใ•ใ‚ŒใŸ้‡ใฟใŒ้‡ๅญๅŒ–ใ•ใ‚Œใฆใ„ใ‚‹ใ“ใจใ‚’็ขบ่ชใ—ใพใ™ใ€‚ ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq") ``` ๅฟ…่ฆไปฅไธŠใฎใƒกใƒขใƒชใ‚’ๅ‰ฒใ‚Šๅฝ“ใฆใšใซใƒขใƒ‡ใƒซใ‚’ใ‚ˆใ‚Š้€Ÿใใƒญใƒผใƒ‰ใ—ใŸใ„ๅ ดๅˆใฏใ€`device_map` ๅผ•ๆ•ฐใฏ้‡ๅญๅŒ–ใƒขใƒ‡ใƒซใงใ‚‚ๆฉŸ่ƒฝใ—ใพใ™ใ€‚ `accelerate`ใƒฉใ‚คใƒ–ใƒฉใƒชใŒใ‚คใƒณใ‚นใƒˆใƒผใƒซใ•ใ‚Œใฆใ„ใ‚‹ใ“ใจใ‚’็ขบ่ชใ—ใฆใใ ใ•ใ„ใ€‚ ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto") ``` ### Exllama kernels for faster inference 4 ใƒ“ใƒƒใƒˆ ใƒขใƒ‡ใƒซใฎๅ ดๅˆใ€ๆŽจ่ซ–้€Ÿๅบฆใ‚’้ซ˜ใ‚ใ‚‹ใŸใ‚ใซ exllama ใ‚ซใƒผใƒใƒซใ‚’ไฝฟ็”จใงใใพใ™ใ€‚ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใงๆœ‰ๅŠนใซใชใฃใฆใ„ใพใ™ใ€‚ [`GPTQConfig`] ใง `disable_exllama` ใ‚’ๆธกใ™ใ“ใจใงใ€ใใฎๅ‹•ไฝœใ‚’ๅค‰ๆ›ดใงใใพใ™ใ€‚ใ“ใ‚Œใซใ‚ˆใ‚Šใ€่จญๅฎšใซไฟๅญ˜ใ•ใ‚Œใฆใ„ใ‚‹้‡ๅญๅŒ–่จญๅฎšใŒไธŠๆ›ธใใ•ใ‚Œใพใ™ใ€‚ใ‚ซใƒผใƒใƒซใซ้–ข้€ฃใ™ใ‚‹ๅฑžๆ€งใฎใฟใ‚’ไธŠๆ›ธใใงใใ‚‹ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ใ•ใ‚‰ใซใ€exllama ใ‚ซใƒผใƒใƒซใ‚’ไฝฟ็”จใ—ใŸใ„ๅ ดๅˆใฏใ€ใƒขใƒ‡ใƒซๅ…จไฝ“ใ‚’ GPU ไธŠใซ็ฝฎใๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ ```py import torch gptq_config = GPTQConfig(bits=4, disable_exllama=False) model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config = gptq_config) ``` ็พๆ™‚็‚นใงใฏ 4 ใƒ“ใƒƒใƒˆ ใƒขใƒ‡ใƒซใฎใฟใŒใ‚ตใƒใƒผใƒˆใ•ใ‚Œใฆใ„ใ‚‹ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ใ•ใ‚‰ใซใ€peft ใ‚’ไฝฟ็”จใ—ใฆ้‡ๅญๅŒ–ใƒขใƒ‡ใƒซใ‚’ๅพฎ่ชฟๆ•ดใ—ใฆใ„ใ‚‹ๅ ดๅˆใฏใ€exllama ใ‚ซใƒผใƒใƒซใ‚’้žใ‚ขใ‚ฏใƒ†ใ‚ฃใƒ–ๅŒ–ใ™ใ‚‹ใ“ใจใ‚’ใŠๅ‹งใ‚ใ—ใพใ™ใ€‚ #### Fine-tune a quantized model Hugging Face ใ‚จใ‚ณใ‚ทใ‚นใƒ†ใƒ ใฎใ‚ขใƒ€ใƒ—ใ‚ฟใƒผใฎๅ…ฌๅผใ‚ตใƒใƒผใƒˆใซใ‚ˆใ‚Šใ€GPTQ ใง้‡ๅญๅŒ–ใ•ใ‚ŒใŸใƒขใƒ‡ใƒซใ‚’ๅพฎ่ชฟๆ•ดใงใใพใ™ใ€‚ ่ฉณ็ดฐใซใคใ„ใฆใฏใ€[`peft`](https://github.com/huggingface/peft) ใƒฉใ‚คใƒ–ใƒฉใƒชใ‚’ใ”่ฆงใใ ใ•ใ„ใ€‚ ### Example demo GPTQ ใ‚’ไฝฟ็”จใ—ใฆใƒขใƒ‡ใƒซใ‚’้‡ๅญๅŒ–ใ™ใ‚‹ๆ–นๆณ•ใจใ€peft ใ‚’ไฝฟ็”จใ—ใฆ้‡ๅญๅŒ–ใ•ใ‚ŒใŸใƒขใƒ‡ใƒซใ‚’ๅพฎ่ชฟๆ•ดใ™ใ‚‹ๆ–นๆณ•ใซใคใ„ใฆใฏใ€Google Colab [ใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏ](https://colab.research.google.com/drive/1_TIrmuKOFhuRRiTWN94iLKUFu6ZX4ceb?usp=sharing) ใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„ใ€‚ ### GPTQConfig [[autodoc]] GPTQConfig ## `bitsandbytes` Integration ๐Ÿค— Transformers ใฏใ€`bitsandbytes` ใงๆœ€ใ‚‚ใ‚ˆใไฝฟ็”จใ•ใ‚Œใ‚‹ใƒขใ‚ธใƒฅใƒผใƒซใจ็ทŠๅฏ†ใซ็ตฑๅˆใ•ใ‚Œใฆใ„ใพใ™ใ€‚ๆ•ฐ่กŒใฎใ‚ณใƒผใƒ‰ใงใƒขใƒ‡ใƒซใ‚’ 8 ใƒ“ใƒƒใƒˆ็ฒพๅบฆใงใƒญใƒผใƒ‰ใงใใพใ™ใ€‚ ใ“ใ‚Œใฏใ€`bitsandbytes`ใฎ `0.37.0`ใƒชใƒชใƒผใ‚นไปฅ้™ใ€ใปใจใ‚“ใฉใฎ GPU ใƒใƒผใƒ‰ใ‚ฆใ‚งใ‚ขใงใ‚ตใƒใƒผใƒˆใ•ใ‚Œใฆใ„ใพใ™ใ€‚ ้‡ๅญๅŒ–ๆ–นๆณ•ใฎ่ฉณ็ดฐใซใคใ„ใฆใฏใ€[LLM.int8()](https://huggingface.co/papers/2208.07339) ่ซ–ๆ–‡ใ€ใพใŸใฏ [ใƒ–ใƒญใ‚ฐๆŠ•็จฟ](https://huggingface.co/blog/hf-bitsandbytes-) ใ‚’ใ”่ฆงใใ ใ•ใ„ใ€‚็ตฑๅˆ๏ผ‰ใ‚ณใƒฉใƒœใƒฌใƒผใ‚ทใƒงใƒณใซใคใ„ใฆใ€‚ `0.39.0`ใƒชใƒชใƒผใ‚นไปฅ้™ใ€FP4 ใƒ‡ใƒผใ‚ฟๅž‹ใ‚’ๆดป็”จใ—ใ€4 ใƒ“ใƒƒใƒˆ้‡ๅญๅŒ–ใ‚’ไฝฟ็”จใ—ใฆ`device_map`ใ‚’ใ‚ตใƒใƒผใƒˆใ™ใ‚‹ไปปๆ„ใฎใƒขใƒ‡ใƒซใ‚’ใƒญใƒผใƒ‰ใงใใพใ™ใ€‚ ็‹ฌ่‡ชใฎ pytorch ใƒขใƒ‡ใƒซใ‚’้‡ๅญๅŒ–ใ—ใŸใ„ๅ ดๅˆใฏใ€๐Ÿค— Accelerate ใƒฉใ‚คใƒ–ใƒฉใƒชใฎ [ใƒ‰ใ‚ญใƒฅใƒกใƒณใƒˆ](https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization) ใ‚’ใƒใ‚งใƒƒใ‚ฏใ—ใฆใใ ใ•ใ„ใ€‚ `bitsandbytes`็ตฑๅˆใ‚’ไฝฟ็”จใ—ใฆใงใใ‚‹ใ“ใจใฏๆฌกใฎใจใŠใ‚Šใงใ™ ### General usage ใƒขใƒ‡ใƒซใŒ ๐Ÿค— Accelerate ใซใ‚ˆใ‚‹่ชญใฟ่พผใฟใ‚’ใ‚ตใƒใƒผใƒˆใ—ใ€`torch.nn.Linear` ใƒฌใ‚คใƒคใƒผใŒๅซใพใ‚Œใฆใ„ใ‚‹้™ใ‚Šใ€ [`~PreTrainedModel.from_pretrained`] ใƒกใ‚ฝใƒƒใƒ‰ใ‚’ๅ‘ผใณๅ‡บใ™ใจใใซ `load_in_8bit` ใพใŸใฏ `load_in_4bit` ๅผ•ๆ•ฐใ‚’ไฝฟ็”จใ—ใฆใƒขใƒ‡ใƒซใ‚’้‡ๅญๅŒ–ใงใใพใ™ใ€‚ใ“ใ‚Œใฏใฉใฎใ‚ˆใ†ใชใƒขใƒ€ใƒชใƒ†ใ‚ฃใงใ‚‚ๅŒๆง˜ใซๆฉŸ่ƒฝใ™ใ‚‹ใฏใšใงใ™ใ€‚ ```python from transformers import AutoModelForCausalLM model_8bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_8bit=True) model_4bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_4bit=True) ``` ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใงใฏใ€ไป–ใฎใ™ในใฆใฎใƒขใ‚ธใƒฅใƒผใƒซ (ไพ‹: `torch.nn.LayerNorm`) ใฏ `torch.float16` ใซๅค‰ๆ›ใ•ใ‚Œใพใ™ใŒใ€ใใฎ `dtype` ใ‚’ๅค‰ๆ›ดใ—ใŸใ„ๅ ดๅˆใฏใ€`dtype` ๅผ•ๆ•ฐใ‚’ไธŠๆ›ธใใงใใพใ™ใ€‚ ```python >>> import torch >>> from transformers import AutoModelForCausalLM >>> model_8bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_8bit=True, dtype=torch.float32) >>> model_8bit.model.decoder.layers[-1].final_layer_norm.weight.dtype torch.float32 ``` ### FP4 quantization #### Requirements ไปฅไธ‹ใฎใ‚ณใƒผใƒ‰ ใ‚นใƒ‹ใƒšใƒƒใƒˆใ‚’ๅฎŸ่กŒใ™ใ‚‹ๅ‰ใซใ€ไปฅไธ‹ใฎ่ฆไปถใŒใ‚คใƒณใ‚นใƒˆใƒผใƒซใ•ใ‚Œใฆใ„ใ‚‹ใ“ใจใ‚’็ขบ่ชใ—ใฆใใ ใ•ใ„ใ€‚ - ๆœ€ๆ–ฐใฎ`bitsandbytes`ใƒฉใ‚คใƒ–ใƒฉใƒช `pip install bitsandbytes>=0.39.0` - ๆœ€ๆ–ฐใฎ`accelerate`ใ‚’ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ `pip install --upgrade accelerate` - ๆœ€ๆ–ฐใฎ `transformers` ใ‚’ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ `pip install --upgrade transformers` #### Tips and best practices - **้ซ˜ๅบฆใชไฝฟ็”จๆณ•:** ๅฏ่ƒฝใชใ™ในใฆใฎใ‚ชใƒ—ใ‚ทใƒงใƒณใ‚’ไฝฟ็”จใ—ใŸ 4 ใƒ“ใƒƒใƒˆ้‡ๅญๅŒ–ใฎ้ซ˜ๅบฆใชไฝฟ็”จๆณ•ใซใคใ„ใฆใฏใ€[ใ“ใฎ Google Colab ใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏ](https://colab.research.google.com/drive/1ge2F1QSK8Q7h0hn3YKuBCOAS0bK8E0wf) ใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„ใ€‚ - **`batch_size=1` ใซใ‚ˆใ‚‹้ซ˜้€ŸๆŽจ่ซ– :** bitsandbytes ใฎ `0.40.0` ใƒชใƒชใƒผใ‚นไปฅ้™ใ€`batch_size=1` ใงใฏ้ซ˜้€ŸๆŽจ่ซ–ใฎๆฉๆตใ‚’ๅ—ใ‘ใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚ [ใ“ใ‚Œใ‚‰ใฎใƒชใƒชใƒผใ‚น ใƒŽใƒผใƒˆ](https://github.com/TimDettmers/bitsandbytes/releases/tag/0.40.0) ใ‚’็ขบ่ชใ—ใ€ใ“ใฎๆฉŸ่ƒฝใ‚’ๆดป็”จใ™ใ‚‹ใซใฏ`0.40.0`ไปฅ้™ใฎใƒใƒผใ‚ธใƒงใƒณใ‚’ไฝฟ็”จใ—ใฆใ„ใ‚‹ใ“ใจใ‚’็ขบ่ชใ—ใฆใใ ใ•ใ„ใ€‚็ฎฑใฎใ€‚ - **ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐ:** [QLoRA ่ซ–ๆ–‡](https://huggingface.co/papers/2305.14314) ใซใ‚ˆใ‚‹ใจใ€4 ใƒ“ใƒƒใƒˆๅŸบๆœฌใƒขใƒ‡ใƒซใ‚’ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ™ใ‚‹ๅ ดๅˆ (ไพ‹: LoRA ใ‚ขใƒ€ใƒ—ใ‚ฟใƒผใ‚’ไฝฟ็”จ)ใ€`bnb_4bit_quant_type='nf4'` ใ‚’ไฝฟ็”จใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ ใ€‚ - **ๆŽจ่ซ–:** ๆŽจ่ซ–ใฎๅ ดๅˆใ€`bnb_4bit_quant_type` ใฏใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นใซๅคงใใชๅฝฑ้Ÿฟใ‚’ไธŽใˆใพใ›ใ‚“ใ€‚ใŸใ ใ—ใ€ใƒขใƒ‡ใƒซใฎ้‡ใฟใจใฎไธ€่ฒซๆ€งใ‚’ไฟใคใŸใ‚ใซใ€ๅฟ…ใšๅŒใ˜ `bnb_4bit_compute_dtype` ใŠใ‚ˆใณ `dtype` ๅผ•ๆ•ฐใ‚’ไฝฟ็”จใ—ใฆใใ ใ•ใ„ใ€‚ #### Load a large model in 4bit `.from_pretrained` ใƒกใ‚ฝใƒƒใƒ‰ใ‚’ๅ‘ผใณๅ‡บใ™ใจใใซ `load_in_4bit=True` ใ‚’ไฝฟ็”จใ™ใ‚‹ใจใ€ใƒกใƒขใƒชไฝฟ็”จ้‡ใ‚’ (ใŠใŠใ‚ˆใ) 4 ใงๅ‰ฒใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚ ```python # pip install transformers accelerate bitsandbytes from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "bigscience/bloom-1b7" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True) ``` <Tip warning={true}> ใƒขใƒ‡ใƒซใŒ 4 ใƒ“ใƒƒใƒˆใงใƒญใƒผใƒ‰ใ•ใ‚Œใ‚‹ใจใ€็พๆ™‚็‚นใงใฏ้‡ๅญๅŒ–ใ•ใ‚ŒใŸ้‡ใฟใ‚’ใƒใƒ–ใซใƒ—ใƒƒใ‚ทใƒฅใ™ใ‚‹ใ“ใจใฏใงใใชใ„ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ 4 ใƒ“ใƒƒใƒˆใฎ้‡ใฟใฏใพใ ใ‚ตใƒใƒผใƒˆใ•ใ‚Œใฆใ„ใชใ„ใŸใ‚ใ€ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใงใใชใ„ใ“ใจใซใ‚‚ๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ใŸใ ใ—ใ€4 ใƒ“ใƒƒใƒˆ ใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ—ใฆ่ฟฝๅŠ ใฎใƒ‘ใƒฉใƒกใƒผใ‚ฟใƒผใ‚’ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ™ใ‚‹ใ“ใจใ‚‚ใงใใพใ™ใ€‚ใ“ใ‚Œใซใคใ„ใฆใฏๆฌกใฎใ‚ปใ‚ฏใ‚ทใƒงใƒณใง่ชฌๆ˜Žใ—ใพใ™ใ€‚ </Tip> ### Load a large model in 8bit `.from_pretrained` ใƒกใ‚ฝใƒƒใƒ‰ใ‚’ๅ‘ผใณๅ‡บใ™ใจใใซ `load_in_8bit=True` ๅผ•ๆ•ฐใ‚’ไฝฟ็”จใ™ใ‚‹ใจใ€ใƒกใƒขใƒช่ฆไปถใ‚’ใŠใ‚ˆใๅŠๅˆ†ใซใ—ใฆใƒขใƒ‡ใƒซใ‚’ใƒญใƒผใƒ‰ใงใใพใ™ใ€‚ ```python # pip install transformers accelerate bitsandbytes from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model_id = "bigscience/bloom-1b7" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` ๆฌกใซใ€้€šๅธธ [`PreTrainedModel`] ใ‚’ไฝฟ็”จใ™ใ‚‹ใฎใจๅŒใ˜ใ‚ˆใ†ใซใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ—ใพใ™ใ€‚ `get_memory_footprint` ใƒกใ‚ฝใƒƒใƒ‰ใ‚’ไฝฟ็”จใ—ใฆใ€ใƒขใƒ‡ใƒซใฎใƒกใƒขใƒช ใƒ•ใƒƒใƒˆใƒ—ใƒชใƒณใƒˆใ‚’็ขบ่ชใงใใพใ™ใ€‚ ```python print(model.get_memory_footprint()) ``` ใ“ใฎ็ตฑๅˆใซใ‚ˆใ‚Šใ€ๅคงใใชใƒขใƒ‡ใƒซใ‚’ๅฐใ•ใชใƒ‡ใƒใ‚คใ‚นใซใƒญใƒผใƒ‰ใ—ใ€ๅ•้กŒใชใๅฎŸ่กŒใงใใ‚‹ใ‚ˆใ†ใซใชใ‚Šใพใ—ใŸใ€‚ <Tip warning={true}> ใƒขใƒ‡ใƒซใŒ 8 ใƒ“ใƒƒใƒˆใงใƒญใƒผใƒ‰ใ•ใ‚Œใ‚‹ใจใ€ๆœ€ๆ–ฐใฎ `transformers`ใจ`bitsandbytes`ใ‚’ไฝฟ็”จใ™ใ‚‹ๅ ดๅˆใ‚’้™คใใ€้‡ๅญๅŒ–ใ•ใ‚ŒใŸ้‡ใฟใ‚’ใƒใƒ–ใซใƒ—ใƒƒใ‚ทใƒฅใ™ใ‚‹ใ“ใจใฏ็พๅœจไธๅฏ่ƒฝใงใ‚ใ‚‹ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ 8 ใƒ“ใƒƒใƒˆใฎ้‡ใฟใฏใพใ ใ‚ตใƒใƒผใƒˆใ•ใ‚Œใฆใ„ใชใ„ใŸใ‚ใ€ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใงใใชใ„ใ“ใจใซใ‚‚ๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ใŸใ ใ—ใ€8 ใƒ“ใƒƒใƒˆ ใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ—ใฆ่ฟฝๅŠ ใฎใƒ‘ใƒฉใƒกใƒผใ‚ฟใƒผใ‚’ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ™ใ‚‹ใ“ใจใ‚‚ใงใใพใ™ใ€‚ใ“ใ‚Œใซใคใ„ใฆใฏๆฌกใฎใ‚ปใ‚ฏใ‚ทใƒงใƒณใง่ชฌๆ˜Žใ—ใพใ™ใ€‚ ใพใŸใ€`device_map` ใฏใ‚ชใƒ—ใ‚ทใƒงใƒณใงใ™ใŒใ€ๅˆฉ็”จๅฏ่ƒฝใชใƒชใ‚ฝใƒผใ‚นไธŠใงใƒขใƒ‡ใƒซใ‚’ๅŠน็އ็š„ใซใƒ‡ใ‚ฃใ‚นใƒ‘ใƒƒใƒใ™ใ‚‹ใŸใ‚ใ€ๆŽจ่ซ–ใซใฏ `device_map = 'auto'` ใ‚’่จญๅฎšใ™ใ‚‹ใ“ใจใŒๆŽจๅฅจใ•ใ‚Œใพใ™ใ€‚ </Tip> #### Advanced use cases ใ“ใ“ใงใฏใ€FP4 ้‡ๅญๅŒ–ใ‚’ไฝฟ็”จใ—ใฆๅฎŸ่กŒใงใใ‚‹ใ„ใใคใ‹ใฎ้ซ˜ๅบฆใชไฝฟ็”จไพ‹ใซใคใ„ใฆ่ชฌๆ˜Žใ—ใพใ™ใ€‚ ##### Change the compute dtype compute dtype ใฏใ€่จˆ็ฎ—ไธญใซไฝฟ็”จใ•ใ‚Œใ‚‹ dtype ใ‚’ๅค‰ๆ›ดใ™ใ‚‹ใŸใ‚ใซไฝฟ็”จใ•ใ‚Œใพใ™ใ€‚ใŸใจใˆใฐใ€้š ใ—็Šถๆ…‹ใฏ`float32`ใซใ‚ใ‚Šใพใ™ใŒใ€้ซ˜้€ŸๅŒ–ใฎใŸใ‚ใซ่จˆ็ฎ—ใ‚’ bf16 ใซ่จญๅฎšใงใใพใ™ใ€‚ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใงใฏใ€compute dtype ใฏ `float32` ใซ่จญๅฎšใ•ใ‚Œใพใ™ใ€‚ ```python import torch from transformers import BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16) ``` ##### Using NF4 (Normal Float 4) data type NF4 ใƒ‡ใƒผใ‚ฟๅž‹ใ‚’ไฝฟ็”จใ™ใ‚‹ใ“ใจใ‚‚ใงใใพใ™ใ€‚ใ“ใ‚Œใฏใ€ๆญฃ่ฆๅˆ†ๅธƒใ‚’ไฝฟ็”จใ—ใฆๅˆๆœŸๅŒ–ใ•ใ‚ŒใŸ้‡ใฟใซ้ฉๅˆใ—ใŸๆ–ฐใ—ใ„ 4 ใƒ“ใƒƒใƒˆ ใƒ‡ใƒผใ‚ฟๅž‹ใงใ™ใ€‚ใใฎๅฎŸ่กŒใฎใŸใ‚ใซ: ```python from transformers import BitsAndBytesConfig nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", ) model_nf4 = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=nf4_config) ``` ##### Use nested quantization for more memory efficient inference ใพใŸใ€ใƒใ‚นใƒˆใ•ใ‚ŒใŸ้‡ๅญๅŒ–ๆ‰‹ๆณ•ใ‚’ไฝฟ็”จใ™ใ‚‹ใ“ใจใ‚’ใŠๅ‹งใ‚ใ—ใพใ™ใ€‚ใ“ใ‚Œใซใ‚ˆใ‚Šใ€ใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นใ‚’่ฟฝๅŠ ใ™ใ‚‹ใ“ใจใชใใ€ใ‚ˆใ‚ŠๅคšใใฎใƒกใƒขใƒชใŒ็ฏ€็ด„ใ•ใ‚Œใพใ™ใ€‚็ตŒ้จ“็š„ใช่ฆณๅฏŸใ‹ใ‚‰ใ€ใ“ใ‚Œใซใ‚ˆใ‚Šใ€NVIDIA-T4 16GB ไธŠใงใ‚ทใƒผใ‚ฑใƒณใ‚น้•ท 1024ใ€ใƒใƒƒใƒ ใ‚ตใ‚คใ‚บ 1ใ€ๅ‹พ้…็ดฏ็ฉใ‚นใƒ†ใƒƒใƒ— 4 ใฎ llama-13b ใƒขใƒ‡ใƒซใ‚’ๅพฎ่ชฟๆ•ดใ™ใ‚‹ใ“ใจใŒๅฏ่ƒฝใซใชใ‚Šใพใ™ใ€‚ ```python from transformers import BitsAndBytesConfig double_quant_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, ) model_double_quant = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=double_quant_config) ``` ### Push quantized models on the ๐Ÿค— Hub `push_to_hub`ใƒกใ‚ฝใƒƒใƒ‰ใ‚’ๅ˜็ด”ใซไฝฟ็”จใ™ใ‚‹ใ“ใจใงใ€้‡ๅญๅŒ–ใ•ใ‚ŒใŸใƒขใƒ‡ใƒซใ‚’ใƒใƒ–ใซใƒ—ใƒƒใ‚ทใƒฅใงใใพใ™ใ€‚ใ“ใ‚Œใซใ‚ˆใ‚Šใ€ๆœ€ๅˆใซ้‡ๅญๅŒ–ๆง‹ๆˆใƒ•ใ‚กใ‚คใƒซใŒใƒ—ใƒƒใ‚ทใƒฅใ•ใ‚Œใ€ๆฌกใซ้‡ๅญๅŒ–ใ•ใ‚ŒใŸใƒขใƒ‡ใƒซใฎ้‡ใฟใŒใƒ—ใƒƒใ‚ทใƒฅใ•ใ‚Œใพใ™ใ€‚ ใ“ใฎๆฉŸ่ƒฝใ‚’ไฝฟ็”จใงใใ‚‹ใ‚ˆใ†ใซใ™ใ‚‹ใซใฏใ€ๅฟ…ใš `bitsandbytes>0.37.2` ใ‚’ไฝฟ็”จใ—ใฆใใ ใ•ใ„ (ใ“ใฎ่จ˜ไบ‹ใฎๅŸท็ญ†ๆ™‚็‚นใงใฏใ€`bitsandbytes==0.38.0.post1` ใงใƒ†ใ‚นใƒˆใ—ใพใ—ใŸ)ใ€‚ ```python from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m", quantization_config=BitsAndBytesConfig(load_in_8bit=True)) tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m") model.push_to_hub("bloom-560m-8bit") ``` <Tip warning={true}> ๅคง่ฆๆจกใชใƒขใƒ‡ใƒซใงใฏใ€ใƒใƒ–ไธŠใง 8 ใƒ“ใƒƒใƒˆ ใƒขใƒ‡ใƒซใ‚’ใƒ—ใƒƒใ‚ทใƒฅใ™ใ‚‹ใ“ใจใŒๅผทใๆŽจๅฅจใ•ใ‚Œใพใ™ใ€‚ใ“ใ‚Œใซใ‚ˆใ‚Šใ€ใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃใฏใƒกใƒขใƒช ใƒ•ใƒƒใƒˆใƒ—ใƒชใƒณใƒˆใฎๅ‰Šๆธ›ใจใ€ใŸใจใˆใฐ Google Colab ใงใฎๅคง่ฆๆจกใชใƒขใƒ‡ใƒซใฎ่ชญใฟ่พผใฟใซใ‚ˆใ‚‹ๆฉๆตใ‚’ๅ—ใ‘ใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚ </Tip> ### Load a quantized model from the ๐Ÿค— Hub `from_pretrained`ใƒกใ‚ฝใƒƒใƒ‰ใ‚’ไฝฟ็”จใ—ใฆใ€ใƒใƒ–ใ‹ใ‚‰้‡ๅญๅŒ–ใƒขใƒ‡ใƒซใ‚’ใƒญใƒผใƒ‰ใงใใพใ™ใ€‚ๅฑžๆ€ง `quantization_config` ใŒใƒขใƒ‡ใƒซ่จญๅฎšใ‚ชใƒ–ใ‚ธใ‚งใ‚ฏใƒˆใซๅญ˜ๅœจใ™ใ‚‹ใ“ใจใ‚’็ขบ่ชใ—ใฆใ€ใƒ—ใƒƒใ‚ทใƒฅใ•ใ‚ŒใŸ้‡ใฟใŒ้‡ๅญๅŒ–ใ•ใ‚Œใฆใ„ใ‚‹ใ“ใจใ‚’็ขบ่ชใ—ใพใ™ใ€‚ ```python from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("{your_username}/bloom-560m-8bit", device_map="auto") ``` ใ“ใฎๅ ดๅˆใ€ๅผ•ๆ•ฐ `load_in_8bit=True` ใ‚’ๆŒ‡ๅฎšใ™ใ‚‹ๅฟ…่ฆใฏใ‚ใ‚Šใพใ›ใ‚“ใŒใ€`bitsandbytes` ใจ `accelerate` ใŒใ‚คใƒณใ‚นใƒˆใƒผใƒซใ•ใ‚Œใฆใ„ใ‚‹ใ“ใจใ‚’็ขบ่ชใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚‹ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ ใพใŸใ€`device_map` ใฏใ‚ชใƒ—ใ‚ทใƒงใƒณใงใ™ใŒใ€ๅˆฉ็”จๅฏ่ƒฝใชใƒชใ‚ฝใƒผใ‚นไธŠใงใƒขใƒ‡ใƒซใ‚’ๅŠน็އ็š„ใซใƒ‡ใ‚ฃใ‚นใƒ‘ใƒƒใƒใ™ใ‚‹ใŸใ‚ใ€ๆŽจ่ซ–ใซใฏ `device_map = 'auto'` ใ‚’่จญๅฎšใ™ใ‚‹ใ“ใจใŒๆŽจๅฅจใ•ใ‚Œใพใ™ใ€‚ ### Advanced use cases ใ“ใฎใ‚ปใ‚ฏใ‚ทใƒงใƒณใฏใ€8 ใƒ“ใƒƒใƒˆ ใƒขใƒ‡ใƒซใฎใƒญใƒผใƒ‰ใจๅฎŸ่กŒไปฅๅค–ใซไฝ•ใŒใงใใ‚‹ใ‹ใ‚’ๆŽขๆฑ‚ใ—ใŸใ„ไธŠ็ดšใƒฆใƒผใ‚ถใƒผใ‚’ๅฏพ่ฑกใจใ—ใฆใ„ใพใ™ใ€‚ #### Offload between `cpu` and `gpu` ใ“ใฎ้ซ˜ๅบฆใชไฝฟ็”จไพ‹ใฎ 1 ใคใฏใ€ใƒขใƒ‡ใƒซใ‚’ใƒญใƒผใƒ‰ใ—ใ€`CPU`ใจ`GPU`ใฎ้–“ใง้‡ใฟใ‚’ใƒ‡ใ‚ฃใ‚นใƒ‘ใƒƒใƒใงใใ‚‹ใ“ใจใงใ™ใ€‚ CPU ไธŠใงใƒ‡ใ‚ฃใ‚นใƒ‘ใƒƒใƒใ•ใ‚Œใ‚‹้‡ใฟใฏ **8 ใƒ“ใƒƒใƒˆใซๅค‰ๆ›ใ•ใ‚Œใชใ„**ใŸใ‚ใ€`float32`ใซไฟๆŒใ•ใ‚Œใ‚‹ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ใ“ใฎๆฉŸ่ƒฝใฏใ€้žๅธธใซๅคง่ฆๆจกใชใƒขใƒ‡ใƒซใ‚’้ฉๅˆใ•ใ›ใ€ใใฎใƒขใƒ‡ใƒซใ‚’ GPU ใจ CPU ใฎ้–“ใงใƒ‡ใ‚ฃใ‚นใƒ‘ใƒƒใƒใ—ใŸใ„ใƒฆใƒผใ‚ถใƒผใ‚’ๅฏพ่ฑกใจใ—ใฆใ„ใพใ™ใ€‚ ใพใšใ€`transformers` ใ‹ใ‚‰ [`BitsAndBytesConfig`] ใ‚’ใƒญใƒผใƒ‰ใ—ใ€ๅฑžๆ€ง `llm_int8_enable_fp32_cpu_offload` ใ‚’ `True` ใซ่จญๅฎšใ—ใพใ™ใ€‚ ```python from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True) ``` `bigscience/bloom-1b7`ใƒขใƒ‡ใƒซใ‚’ใƒญใƒผใƒ‰ใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใ€`lm_head`ใ‚’้™คใใƒขใƒ‡ใƒซๅ…จไฝ“ใซโ€‹โ€‹้ฉๅˆใ™ใ‚‹ใฎใซๅๅˆ†ใช GPU RAM ใŒใ‚ใ‚‹ใจใ—ใพใ™ใ€‚ใ—ใŸใŒใฃใฆใ€ๆฌกใฎใ‚ˆใ†ใซใ‚ซใ‚นใ‚ฟใƒ  device_map ใ‚’ไฝœๆˆใ—ใพใ™ใ€‚ ```python device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 0, } ``` ใใ—ใฆใ€ๆฌกใฎใ‚ˆใ†ใซใƒขใƒ‡ใƒซใ‚’ใƒญใƒผใƒ‰ใ—ใพใ™ใ€‚ ```python model_8bit = AutoModelForCausalLM.from_pretrained( "bigscience/bloom-1b7", device_map=device_map, quantization_config=quantization_config, ) ``` ไปฅไธŠใงใ™๏ผใƒขใƒ‡ใƒซใ‚’ๆฅฝใ—ใ‚“ใงใใ ใ•ใ„๏ผ #### Play with `llm_int8_threshold` `llm_int8_threshold` ๅผ•ๆ•ฐใ‚’ๆ“ไฝœใ—ใฆใ€ๅค–ใ‚Œๅ€คใฎใ—ใใ„ๅ€คใ‚’ๅค‰ๆ›ดใงใใพใ™ใ€‚ ๅค–ใ‚Œๅ€ค ใจใฏใ€็‰นๅฎšใฎใ—ใใ„ๅ€คใ‚ˆใ‚Šๅคงใใ„้š ใ‚ŒใŸ็Šถๆ…‹ใฎๅ€คใงใ™ใ€‚ ใ“ใ‚Œใฏใ€`LLM.int8()`่ซ–ๆ–‡ใง่ชฌๆ˜Žใ•ใ‚Œใฆใ„ใ‚‹ๅค–ใ‚Œๅ€คๆคœๅ‡บใฎๅค–ใ‚Œๅ€คใ—ใใ„ๅ€คใซๅฏพๅฟœใ—ใพใ™ใ€‚ใ“ใฎใ—ใใ„ๅ€คใ‚’่ถ…ใˆใ‚‹้š ใ—็Šถๆ…‹ใฎๅ€คใฏๅค–ใ‚Œๅ€คใจใฟใชใ•ใ‚Œใ€ใใ‚Œใ‚‰ใฎๅ€คใซๅฏพใ™ใ‚‹ๆ“ไฝœใฏ fp16 ใงๅฎŸ่กŒใ•ใ‚Œใพใ™ใ€‚้€šๅธธใ€ๅ€คใฏๆญฃ่ฆๅˆ†ๅธƒใ—ใพใ™ใ€‚ใคใพใ‚Šใ€ใปใจใ‚“ใฉใฎๅ€คใฏ [-3.5, 3.5] ใฎ็ฏ„ๅ›ฒๅ†…ใซใ‚ใ‚Šใพใ™ใŒใ€ๅคง่ฆๆจกใชใƒขใƒ‡ใƒซใงใฏๅคงใใ็•ฐใชใ‚‹ๅˆ†ๅธƒใ‚’็คบใ™ไพ‹ๅค–็š„ใช็ณป็ตฑ็š„ๅค–ใ‚Œๅ€คใŒใ„ใใคใ‹ใ‚ใ‚Šใพใ™ใ€‚ใ“ใ‚Œใ‚‰ใฎๅค–ใ‚Œๅ€คใฏใ€ๅคšใใฎๅ ดๅˆ [-60, -6] ใพใŸใฏ [6, 60] ใฎ็ฏ„ๅ›ฒๅ†…ใซใ‚ใ‚Šใพใ™ใ€‚ Int8 ้‡ๅญๅŒ–ใฏใ€ๅคงใใ•ใŒ 5 ็จ‹ๅบฆใพใงใฎๅ€คใงใฏใ†ใพใๆฉŸ่ƒฝใ—ใพใ™ใŒใ€ใใ‚Œใ‚’่ถ…ใˆใ‚‹ใจใ€ใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นใŒๅคงๅน…ใซไฝŽไธ‹ใ—ใพใ™ใ€‚้ฉๅˆ‡ใชใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎใ—ใใ„ๅ€คใฏ 6 ใงใ™ใŒใ€ใ‚ˆใ‚Šไธๅฎ‰ๅฎšใชใƒขใƒ‡ใƒซ (ๅฐ่ฆๆจกใชใƒขใƒ‡ใƒซใ€ๅพฎ่ชฟๆ•ด) ใงใฏใ€ใ‚ˆใ‚ŠไฝŽใ„ใ—ใใ„ๅ€คใŒๅฟ…่ฆใซใชใ‚‹ๅ ดๅˆใŒใ‚ใ‚Šใพใ™ใ€‚ ใ“ใฎๅผ•ๆ•ฐใฏใ€ใƒขใƒ‡ใƒซใฎๆŽจ่ซ–้€Ÿๅบฆใซๅฝฑ้Ÿฟใ‚’ไธŽใˆใ‚‹ๅฏ่ƒฝๆ€งใŒใ‚ใ‚Šใพใ™ใ€‚ใ“ใฎใƒ‘ใƒฉใƒกใƒผใ‚ฟใ‚’่ฉฆใ—ใฆใฟใฆใ€ใƒฆใƒผใ‚นใ‚ฑใƒผใ‚นใซๆœ€้ฉใชใƒ‘ใƒฉใƒกใƒผใ‚ฟใ‚’่ฆ‹ใคใ‘ใ‚‹ใ“ใจใ‚’ใŠๅ‹งใ‚ใ—ใพใ™ใ€‚ ```python from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model_id = "bigscience/bloom-1b7" quantization_config = BitsAndBytesConfig( llm_int8_threshold=10, ) model_8bit = AutoModelForCausalLM.from_pretrained( model_id, device_map=device_map, quantization_config=quantization_config, ) tokenizer = AutoTokenizer.from_pretrained(model_id) ``` #### Skip the conversion of some modules ไธ€้ƒจใฎใƒขใƒ‡ใƒซใซใฏใ€ๅฎ‰ๅฎšๆ€งใ‚’็ขบไฟใ™ใ‚‹ใŸใ‚ใซ 8 ใƒ“ใƒƒใƒˆใซๅค‰ๆ›ใ™ใ‚‹ๅฟ…่ฆใŒใชใ„ใƒขใ‚ธใƒฅใƒผใƒซใŒใ„ใใคใ‹ใ‚ใ‚Šใพใ™ใ€‚ใŸใจใˆใฐใ€ใ‚ธใƒฅใƒผใ‚ฏใƒœใƒƒใ‚ฏใ‚น ใƒขใƒ‡ใƒซใซใฏใ€ใ‚นใ‚ญใƒƒใƒ—ใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚‹ใ„ใใคใ‹ใฎ `lm_head` ใƒขใ‚ธใƒฅใƒผใƒซใŒใ‚ใ‚Šใพใ™ใ€‚ `llm_int8_skip_modules` ใง้Šใ‚“ใงใฟใ‚‹ ```python from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model_id = "bigscience/bloom-1b7" quantization_config = BitsAndBytesConfig( llm_int8_skip_modules=["lm_head"], ) model_8bit = AutoModelForCausalLM.from_pretrained( model_id, device_map=device_map, quantization_config=quantization_config, ) tokenizer = AutoTokenizer.from_pretrained(model_id) ``` #### Fine-tune a model that has been loaded in 8-bit Hugging Face ใ‚จใ‚ณใ‚ทใ‚นใƒ†ใƒ ใฎใ‚ขใƒ€ใƒ—ใ‚ฟใƒผใฎๅ…ฌๅผใ‚ตใƒใƒผใƒˆใซใ‚ˆใ‚Šใ€8 ใƒ“ใƒƒใƒˆใงใƒญใƒผใƒ‰ใ•ใ‚ŒใŸใƒขใƒ‡ใƒซใ‚’ๅพฎ่ชฟๆ•ดใงใใพใ™ใ€‚ ใ“ใ‚Œใซใ‚ˆใ‚Šใ€ๅ˜ไธ€ใฎ Google Colab ใง`flan-t5-large`ใ‚„`facebook/opt-6.7b`ใชใฉใฎๅคง่ฆๆจกใƒขใƒ‡ใƒซใ‚’ๅพฎ่ชฟๆ•ดใ™ใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚่ฉณ็ดฐใซใคใ„ใฆใฏใ€[`peft`](https://github.com/huggingface/peft) ใƒฉใ‚คใƒ–ใƒฉใƒชใ‚’ใ”่ฆงใใ ใ•ใ„ใ€‚ ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐ็”จใฎใƒขใƒ‡ใƒซใ‚’ใƒญใƒผใƒ‰ใ™ใ‚‹ใจใใซ `device_map` ใ‚’ๆธกใ™ๅฟ…่ฆใŒใชใ„ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ใƒขใƒ‡ใƒซใŒ GPU ใซ่‡ชๅ‹•็š„ใซใƒญใƒผใƒ‰ใ•ใ‚Œใพใ™ใ€‚ๅฟ…่ฆใซๅฟœใ˜ใฆใ€ใƒ‡ใƒใ‚คใ‚น ใƒžใƒƒใƒ—ใ‚’็‰นๅฎšใฎใƒ‡ใƒใ‚คใ‚นใซ่จญๅฎšใ™ใ‚‹ใ“ใจใ‚‚ใงใใพใ™ (ไพ‹: `cuda:0`ใ€`0`ใ€`torch.device('cuda:0')`)ใ€‚ `device_map=auto`ใฏๆŽจ่ซ–ใฎใฟใซไฝฟ็”จใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚‹ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ ### BitsAndBytesConfig [[autodoc]] BitsAndBytesConfig ## Quantization with ๐Ÿค— `optimum` `optimum`ใงใ‚ตใƒใƒผใƒˆใ•ใ‚Œใฆใ„ใ‚‹้‡ๅญๅŒ–ๆ–นๆณ•ใฎ่ฉณ็ดฐใซใคใ„ใฆใฏใ€[Optimum ใƒ‰ใ‚ญใƒฅใƒกใƒณใƒˆ](https://huggingface.co/docs/optimum/index) ใ‚’ๅ‚็…งใ—ใ€ใ“ใ‚Œใ‚‰ใŒ่‡ชๅˆ†ใฎใƒฆใƒผใ‚นใ‚ฑใƒผใ‚นใซ้ฉ็”จใงใใ‚‹ใ‹ใฉใ†ใ‹ใ‚’็ขบ่ชใ—ใฆใใ ใ•ใ„ใ€‚
transformers/docs/source/ja/main_classes/quantization.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/quantization.md", "repo_id": "transformers", "token_count": 10644 }
422
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BertJapanese ## Overview BERT ใƒขใƒ‡ใƒซใฏๆ—ฅๆœฌ่ชžใƒ†ใ‚ญใ‚นใƒˆใงใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ•ใ‚Œใพใ—ใŸใ€‚ 2 ใคใฎ็•ฐใชใ‚‹ใƒˆใƒผใ‚ฏใƒณๅŒ–ๆ–นๆณ•ใ‚’ๅ‚™ใˆใŸใƒขใƒ‡ใƒซใŒใ‚ใ‚Šใพใ™ใ€‚ - MeCab ใจ WordPiece ใ‚’ไฝฟ็”จใ—ใฆใƒˆใƒผใ‚ฏใƒณๅŒ–ใ—ใพใ™ใ€‚ใ“ใ‚Œใซใฏใ€[MeCab](https://taku910.github.io/mecab/) ใฎใƒฉใƒƒใƒ‘ใƒผใงใ‚ใ‚‹ [fugashi](https://github.com/polm/fugashi) ใจใ„ใ†่ฟฝๅŠ ใฎไพๅญ˜้–ขไฟ‚ใŒๅฟ…่ฆใงใ™ใ€‚ - ๆ–‡ๅญ—ใซใƒˆใƒผใ‚ฏใƒณๅŒ–ใ—ใพใ™ใ€‚ *MecabTokenizer* ใ‚’ไฝฟ็”จใ™ใ‚‹ใซใฏใ€`pip installTransformers["ja"]` (ใพใŸใฏใ€ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ๅ ดๅˆใฏ `pip install -e .["ja"]`) ใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ ใ‚ฝใƒผใ‚นใ‹ใ‚‰๏ผ‰ไพๅญ˜้–ขไฟ‚ใ‚’ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ—ใพใ™ใ€‚ [cl-tohakuใƒชใƒใ‚ธใƒˆใƒชใฎ่ฉณ็ดฐ](https://github.com/cl-tohaku/bert-japanese)ใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„ใ€‚ MeCab ใŠใ‚ˆใณ WordPiece ใƒˆใƒผใ‚ฏใƒณๅŒ–ใงใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ™ใ‚‹ไพ‹: ```python >>> import torch >>> from transformers import AutoModel, AutoTokenizer >>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese") >>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese") >>> ## Input Japanese Text >>> line = "ๅพ่ผฉใฏ็Œซใงใ‚ใ‚‹ใ€‚" >>> inputs = tokenizer(line, return_tensors="pt") >>> print(tokenizer.decode(inputs["input_ids"][0])) [CLS] ๅพ่ผฉ ใฏ ็Œซ ใง ใ‚ใ‚‹ ใ€‚ [SEP] >>> outputs = bertjapanese(**inputs) ``` ๆ–‡ๅญ—ใƒˆใƒผใ‚ฏใƒณๅŒ–ใ‚’ไฝฟ็”จใ—ใŸใƒขใƒ‡ใƒซใฎไฝฟ็”จไพ‹: ```python >>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese-char") >>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-char") >>> ## Input Japanese Text >>> line = "ๅพ่ผฉใฏ็Œซใงใ‚ใ‚‹ใ€‚" >>> inputs = tokenizer(line, return_tensors="pt") >>> print(tokenizer.decode(inputs["input_ids"][0])) [CLS] ๅพ ่ผฉ ใฏ ็Œซ ใง ใ‚ ใ‚‹ ใ€‚ [SEP] >>> outputs = bertjapanese(**inputs) ``` <Tip> - ใ“ใฎๅฎŸ่ฃ…ใฏใƒˆใƒผใ‚ฏใƒณๅŒ–ๆ–นๆณ•ใ‚’้™คใ„ใฆ BERT ใจๅŒใ˜ใงใ™ใ€‚ใใฎไป–ใฎไฝฟ็”จไพ‹ใซใคใ„ใฆใฏใ€[BERT ใฎใƒ‰ใ‚ญใƒฅใƒกใƒณใƒˆ](bert) ใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„ใ€‚ </Tip> ใ“ใฎใƒขใƒ‡ใƒซใฏ[cl-tohaku](https://huggingface.co/cl-tohaku)ใ‹ใ‚‰ๆไพ›ใ•ใ‚Œใพใ—ใŸใ€‚ ## BertJapaneseTokenizer [[autodoc]] BertJapaneseTokenizer
transformers/docs/source/ja/model_doc/bert-japanese.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bert-japanese.md", "repo_id": "transformers", "token_count": 1114 }
423
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CamemBERT ## Overview CamemBERT ใƒขใƒ‡ใƒซใฏใ€[CamemBERT: a Tasty French Language Model](https://huggingface.co/papers/1911.03894) ใงๆๆกˆใ•ใ‚Œใพใ—ใŸใ€‚ Louis Martin, Benjamin Muller, Pedro Javier Ortiz Suรกrez, Yoann Dupont, Laurent Romary, ร‰ric Villemonte de la Clergerie, Djamรฉ Seddah, and Benoรฎt Sagot. 2019ๅนดใซใƒชใƒชใƒผใ‚นใ•ใ‚ŒใŸFacebookใฎRoBERTaใƒขใƒ‡ใƒซใ‚’ใƒ™ใƒผใ‚นใซใ—ใŸใƒขใƒ‡ใƒซใงใ™ใ€‚ 138GBใฎใƒ•ใƒฉใƒณใ‚น่ชžใƒ†ใ‚ญใ‚นใƒˆใงใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ•ใ‚Œใพใ—ใŸใ€‚ ่ซ–ๆ–‡ใฎ่ฆ็ด„ใฏๆฌกใฎใจใŠใ‚Šใงใ™ใ€‚ *ไบ‹ๅ‰ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ•ใ‚ŒใŸ่จ€่ชžใƒขใƒ‡ใƒซใฏ็พๅœจใ€่‡ช็„ถ่จ€่ชžๅ‡ฆ็†ใงๅบƒใๆ™ฎๅŠใ—ใฆใ„ใพใ™ใ€‚ๆˆๅŠŸใซใ‚‚ใ‹ใ‹ใ‚ใ‚‰ใšใ€ๅˆฉ็”จๅฏ่ƒฝใชใปใจใ‚“ใฉใฎ ใƒขใƒ‡ใƒซใฏ่‹ฑ่ชžใฎใƒ‡ใƒผใ‚ฟใ€ใพใŸใฏ่ค‡ๆ•ฐ่จ€่ชžใฎใƒ‡ใƒผใ‚ฟใฎ้€ฃ็ตใงใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ•ใ‚Œใฆใ„ใพใ™ใ€‚ใ“ใ‚Œใซใ‚ˆใ‚Šใ€ ใ“ใฎใ‚ˆใ†ใชใƒขใƒ‡ใƒซใฎๅฎŸ้š›ใฎไฝฟ็”จใฏใ€่‹ฑ่ชžใ‚’้™คใใ™ในใฆใฎ่จ€่ชžใง้žๅธธใซ้™ใ‚‰ใ‚Œใฆใ„ใพใ™ใ€‚ใƒ•ใƒฉใƒณใ‚นไบบใซใจใฃใฆใ“ใฎๅ•้กŒใซๅฏพๅ‡ฆใ™ใ‚‹ใ“ใจใ‚’็›ฎๆŒ‡ใ—ใฆใ€ Bi-direction Encoders for Transformers (BERT) ใฎใƒ•ใƒฉใƒณใ‚น่ชž็‰ˆใงใ‚ใ‚‹ CamemBERT ใ‚’ใƒชใƒชใƒผใ‚นใ—ใพใ™ใ€‚ๆธฌๅฎšใ—ใพใ™ ่ค‡ๆ•ฐใฎไธ‹ๆตใ‚ฟใ‚นใ‚ฏใ€ใคใพใ‚Šๅ“่ฉžใ‚ฟใ‚ฐไป˜ใ‘ใซใŠใ‘ใ‚‹ๅคš่จ€่ชžใƒขใƒ‡ใƒซใจๆฏ”่ผƒใ—ใŸ CamemBERT ใฎใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚น ไพๅญ˜้–ขไฟ‚่งฃๆžใ€ๅ›บๆœ‰่กจ็พ่ช่ญ˜ใ€่‡ช็„ถ่จ€่ชžๆŽจ่ซ–ใ€‚ CamemBERT ใฏๆœ€ๅ…ˆ็ซฏๆŠ€่ก“ใ‚’ๅ‘ไธŠใ•ใ›ใพใ™ ๆคœ่จŽใ•ใ‚Œใฆใ„ใ‚‹ใปใจใ‚“ใฉใฎใ‚ฟใ‚นใ‚ฏใซๅฏพๅฟœใ—ใพใ™ใ€‚็งใŸใกใฏใ€็ ”็ฉถใจ ใƒ•ใƒฉใƒณใ‚น่ชž NLP ใฎไธ‹ๆตใ‚ขใƒ—ใƒชใ‚ฑใƒผใ‚ทใƒงใƒณใ€‚* ใ“ใฎใƒขใƒ‡ใƒซใฏ [camembert](https://huggingface.co/camembert) ใซใ‚ˆใฃใฆๆไพ›ใ•ใ‚Œใพใ—ใŸใ€‚ๅ…ƒใฎใ‚ณใƒผใƒ‰ใฏ [ใ“ใ“](https://camembert-model.fr/) ใซใ‚ใ‚Šใพใ™ใ€‚ <Tip> ใ“ใฎๅฎŸ่ฃ…ใฏRoBERTaใจๅŒใ˜ใงใ™ใ€‚ไฝฟ็”จไพ‹ใซใคใ„ใฆใฏ[RoBERTaใฎใƒ‰ใ‚ญใƒฅใƒกใƒณใƒˆ](roberta)ใ‚‚ๅ‚็…งใ—ใฆใใ ใ•ใ„ใ€‚ ๅ…ฅๅŠ›ใจๅ‡บๅŠ›ใซ้–ขใ™ใ‚‹ๆƒ…ๅ ฑใจใ—ใฆใ€‚ </Tip> ## Resources - [ใƒ†ใ‚ญใ‚นใƒˆๅˆ†้กžใ‚ฟใ‚นใ‚ฏใ‚ฌใ‚คใƒ‰(่‹ฑ่ชž็‰ˆ)](../../en/tasks/sequence_classification) - [ใƒˆใƒผใ‚ฏใƒณๅˆ†้กžใ‚ฟใ‚นใ‚ฏใ‚ฌใ‚คใƒ‰](../tasks/token_classification) - [่ณชๅ•ๅ›ž็ญ”ใ‚ฟใ‚นใ‚ฏ ใ‚ฌใ‚คใƒ‰](../tasks/question_answering) - [ๅ› ๆžœ่จ€่ชžใƒขใƒ‡ใƒชใƒณใ‚ฐ ใ‚ฟใ‚นใ‚ฏ ใ‚ฌใ‚คใƒ‰](../tasks/language_modeling) - [ใƒžใ‚นใ‚ฏ่จ€่ชžใƒขใƒ‡ใƒชใƒณใ‚ฐ ใ‚ฟใ‚นใ‚ฏ ใ‚ฌใ‚คใƒ‰](../tasks/masked_language_modeling) - [ๅคš่‚ข้ธๆŠžใ‚ฟใ‚นใ‚ฏ ใ‚ฌใ‚คใƒ‰](../tasks/multiple_choice) ## CamembertConfig [[autodoc]] CamembertConfig ## CamembertTokenizer [[autodoc]] CamembertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## CamembertTokenizerFast [[autodoc]] CamembertTokenizerFast <frameworkcontent> <pt> ## CamembertModel [[autodoc]] CamembertModel ## CamembertForCausalLM [[autodoc]] CamembertForCausalLM ## CamembertForMaskedLM [[autodoc]] CamembertForMaskedLM ## CamembertForSequenceClassification [[autodoc]] CamembertForSequenceClassification ## CamembertForMultipleChoice [[autodoc]] CamembertForMultipleChoice ## CamembertForTokenClassification [[autodoc]] CamembertForTokenClassification ## CamembertForQuestionAnswering [[autodoc]] CamembertForQuestionAnswering </pt> <tf> ## TFCamembertModel [[autodoc]] TFCamembertModel ## TFCamembertForCasualLM [[autodoc]] TFCamembertForCausalLM ## TFCamembertForMaskedLM [[autodoc]] TFCamembertForMaskedLM ## TFCamembertForSequenceClassification [[autodoc]] TFCamembertForSequenceClassification ## TFCamembertForMultipleChoice [[autodoc]] TFCamembertForMultipleChoice ## TFCamembertForTokenClassification [[autodoc]] TFCamembertForTokenClassification ## TFCamembertForQuestionAnswering [[autodoc]] TFCamembertForQuestionAnswering </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/camembert.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/camembert.md", "repo_id": "transformers", "token_count": 1755 }
424
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Convolutional Vision Transformer (CvT) ## Overview CvT ใƒขใƒ‡ใƒซใฏใ€Haping Wuใ€Bin Xiaoใ€Noel Codellaใ€Mengchen Liuใ€Xiyang Daiใ€Lu Yuanใ€Lei Zhang ใซใ‚ˆใฃใฆ [CvT: Introduction Convolutions to Vision Transformers](https://huggingface.co/papers/2103.15808) ใงๆๆกˆใ•ใ‚Œใพใ—ใŸใ€‚็•ณใฟ่พผใฟใƒ“ใ‚ธใƒงใƒณ ใƒˆใƒฉใƒณใ‚นใƒ•ใ‚ฉใƒผใƒžใƒผ (CvT) ใฏใ€ViT ใซ็•ณใฟ่พผใฟใ‚’ๅฐŽๅ…ฅใ—ใฆไธกๆ–นใฎ่จญ่จˆใฎ้•ทๆ‰€ใ‚’ๅผ•ใๅ‡บใ™ใ“ใจใซใ‚ˆใ‚Šใ€[ใƒ“ใ‚ธใƒงใƒณ ใƒˆใƒฉใƒณใ‚นใƒ•ใ‚ฉใƒผใƒžใƒผ (ViT)](vit) ใฎใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นใจๅŠน็އใ‚’ๅ‘ไธŠใ•ใ›ใพใ™ใ€‚ ่ซ–ๆ–‡ใฎ่ฆ็ด„ใฏๆฌกใฎใจใŠใ‚Šใงใ™ใ€‚ *ใ“ใฎ่ซ–ๆ–‡ใงใฏใ€ใƒ“ใ‚ธใƒงใƒณ ใƒˆใƒฉใƒณใ‚นใƒ•ใ‚ฉใƒผใƒžใƒผ (ViT) ใ‚’ๆ”นๅ–„ใ™ใ‚‹ใ€็•ณใฟ่พผใฟใƒ“ใ‚ธใƒงใƒณ ใƒˆใƒฉใƒณใ‚นใƒ•ใ‚ฉใƒผใƒžใƒผ (CvT) ใจๅ‘ผใฐใ‚Œใ‚‹ๆ–ฐใ—ใ„ใ‚ขใƒผใ‚ญใƒ†ใ‚ฏใƒใƒฃใ‚’็ดนไป‹ใ—ใพใ™ใ€‚ ViT ใซ็•ณใฟ่พผใฟใ‚’ๅฐŽๅ…ฅใ—ใฆไธกๆ–นใฎ่จญ่จˆใฎ้•ทๆ‰€ใ‚’ๅผ•ใๅ‡บใ™ใ“ใจใงใ€ใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นใจๅŠน็އใ‚’ๅ‘ไธŠใ•ใ›ใพใ™ใ€‚ใ“ใ‚Œใฏๆฌกใฎใ‚ˆใ†ใซใ—ใฆๅฎŸ็พใ•ใ‚Œใพใ™ใ€‚ 2 ใคใฎไธป่ฆใชๅค‰ๆ›ด: ๆ–ฐใ—ใ„็•ณใฟ่พผใฟใƒˆใƒผใ‚ฏใƒณใฎๅŸ‹ใ‚่พผใฟใ‚’ๅซใ‚€ใƒˆใƒฉใƒณใ‚นใƒ•ใ‚ฉใƒผใƒžใƒผใฎ้šŽๅฑคใจใ€็•ณใฟ่พผใฟใƒˆใƒฉใƒณใ‚นใƒ•ใ‚ฉใƒผใƒžใƒผ ็•ณใฟ่พผใฟๅฐ„ๅฝฑใ‚’ๅˆฉ็”จใ—ใŸใƒ–ใƒญใƒƒใ‚ฏใ€‚ใ“ใ‚Œใ‚‰ใฎๅค‰ๆ›ดใซใ‚ˆใ‚Šใ€็•ณใฟ่พผใฟใƒ‹ใƒฅใƒผใƒฉใƒซ ใƒใƒƒใƒˆใƒฏใƒผใ‚ฏ (CNN) ใฎๆœ›ใพใ—ใ„็‰นๆ€งใŒๅฐŽๅ…ฅใ•ใ‚Œใพใ™ใ€‚ ใƒˆใƒฉใƒณใ‚นใƒ•ใ‚ฉใƒผใƒžใƒผใฎๅˆฉ็‚น (ๅ‹•็š„ใชๆณจๆ„ๅŠ›ใ€ ใ‚ฐใƒญใƒผใƒใƒซใชใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใจใ‚ˆใ‚Š่‰ฏใ„ไธ€่ˆฌๅŒ–)ใ€‚็งใŸใกใฏๅบƒ็ฏ„ใชๅฎŸ้จ“ใ‚’ๅฎŸๆ–ฝใ™ใ‚‹ใ“ใจใง CvT ใ‚’ๆคœ่จผใ—ใ€ใ“ใฎใ‚ขใƒ—ใƒญใƒผใƒใŒ้”ๆˆใงใใ‚‹ใ“ใจใ‚’็คบใ—ใฆใ„ใพใ™ใ€‚ ImageNet-1k ไธŠใฎไป–ใฎใƒ“ใ‚ธใƒงใƒณ ใƒˆใƒฉใƒณใ‚นใƒ•ใ‚ฉใƒผใƒžใƒผใ‚„ ResNet ใ‚ˆใ‚Šใ‚‚ใ€ใƒ‘ใƒฉใƒกใƒผใ‚ฟใŒๅฐ‘ใชใใ€FLOP ใŒไฝŽใ„ใ€ๆœ€ๅ…ˆ็ซฏใฎใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นใ‚’ๅฎŸ็พใ—ใพใ™ใ€‚ๅŠ ใˆใฆใ€ ใ‚ˆใ‚Šๅคงใใชใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆ (ไพ‹: ImageNet-22k) ใงไบ‹ๅ‰ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ—ใ€ไธ‹ๆตใฎใ‚ฟใ‚นใ‚ฏใซๅˆใ‚ใ›ใฆๅพฎ่ชฟๆ•ดใ™ใ‚‹ใจใ€ใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นใฎๅ‘ไธŠใŒ็ถญๆŒใ•ใ‚Œใพใ™ใ€‚ไบ‹ๅ‰ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐๆธˆใฟ ImageNet-22kใ€ๅฝ“็คพใฎ CvT-W24 ใฏใ€ImageNet-1k val set ใง 87.7\% ใจใ„ใ†ใƒˆใƒƒใƒ— 1 ใฎ็ฒพๅบฆใ‚’็ฒๅพ—ใ—ใฆใ„ใพใ™ใ€‚ๆœ€ๅพŒใซใ€็งใŸใกใฎ็ตๆžœใฏใ€ไฝ็ฝฎใ‚จใƒณใ‚ณใƒผใƒ‡ใ‚ฃใƒณใ‚ฐใŒใ€ ๆ—ขๅญ˜ใฎใƒ“ใ‚ธใƒงใƒณ ใƒˆใƒฉใƒณใ‚นใƒ•ใ‚ฉใƒผใƒžใƒผใฎ้‡่ฆใชใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใงใ‚ใ‚‹ใ“ใฎใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใ€ใƒขใƒ‡ใƒซใงใฏๅฎ‰ๅ…จใซๅ‰Š้™คใงใใ‚‹ใŸใ‚ใ€้ซ˜่งฃๅƒๅบฆใฎใƒ“ใ‚ธใƒงใƒณ ใ‚ฟใ‚นใ‚ฏใฎ่จญ่จˆใŒ็ฐก็ด ๅŒ–ใ•ใ‚Œใพใ™ใ€‚* ใ“ใฎใƒขใƒ‡ใƒซใฏ [anugunj](https://huggingface.co/anugunj) ใซใ‚ˆใฃใฆๆไพ›ใ•ใ‚Œใพใ—ใŸใ€‚ๅ…ƒใฎใ‚ณใƒผใƒ‰ใฏ [ใ“ใ“](https://github.com/microsoft/CvT) ใซใ‚ใ‚Šใพใ™ใ€‚ ## Usage tips - CvT ใƒขใƒ‡ใƒซใฏ้€šๅธธใฎ Vision Transformer ใงใ™ใŒใ€็•ณใฟ่พผใฟใงใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ•ใ‚Œใฆใ„ใพใ™ใ€‚ ImageNet-1K ใŠใ‚ˆใณ CIFAR-100 ใงๅพฎ่ชฟๆ•ดใ™ใ‚‹ใจใ€[ใ‚ชใƒชใ‚ธใƒŠใƒซ ใƒขใƒ‡ใƒซ (ViT)](vit) ใ‚ˆใ‚Šใ‚‚ๅ„ชใ‚ŒใŸใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นใ‚’็™บๆฎใ—ใพใ™ใ€‚ - ใ‚ซใ‚นใ‚ฟใƒ  ใƒ‡ใƒผใ‚ฟใฎๅพฎ่ชฟๆ•ดใ ใ‘ใงใชใๆŽจ่ซ–ใซ้–ขใ™ใ‚‹ใƒ‡ใƒข ใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใ‚‚ [ใ“ใ“](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) ใง็ขบ่ชใงใใพใ™ ([`ViTFeatureExtractor ใ‚’็ฝฎใๆ›ใˆใ‚‹ใ ใ‘ใงๆธˆใฟใพใ™) `] ใซใ‚ˆใ‚‹ [`AutoImageProcessor`] ใŠใ‚ˆใณ [`ViTForImageClassification`] ใซใ‚ˆใ‚‹ [`CvtForImageClassification`])ใ€‚ - ๅˆฉ็”จๅฏ่ƒฝใชใƒใ‚งใƒƒใ‚ฏใƒใ‚คใƒณใƒˆใฏใ€(1) [ImageNet-22k](http://www.image-net.org/) (1,400 ไธ‡ใฎ็”ปๅƒใจ 22,000 ใฎใ‚ฏใƒฉใ‚นใฎใ‚ณใƒฌใ‚ฏใ‚ทใƒงใƒณ) ใงใฎใฟไบ‹ๅ‰ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ•ใ‚Œใฆใ„ใ‚‹ใ€(2) ใ‚‚ๅ•้กŒใ‚ใ‚Šใพใ›ใ‚“ใ€‚ ImageNet-22k ใง่ชฟๆ•ดใ€ใพใŸใฏ (3) [ImageNet-1k](http://www.image-net.org/challenges/LSVRC/2012/) (ILSVRC 2012 ใจใ‚‚ๅ‘ผใฐใ‚Œใ‚‹ใ‚ณใƒฌใ‚ฏใ‚ทใƒงใƒณ) ใงใ‚‚ๅพฎ่ชฟๆ•ด130ไธ‡ใฎ ็”ปๅƒใจ 1,000 ใ‚ฏใƒฉใ‚น)ใ€‚ ## Resources CvT ใ‚’ๅง‹ใ‚ใ‚‹ใฎใซๅฝน็ซ‹ใคๅ…ฌๅผ Hugging Face ใŠใ‚ˆใณใ‚ณใƒŸใƒฅใƒ‹ใƒ†ใ‚ฃ (๐ŸŒŽ ใง็คบใ•ใ‚Œใ‚‹) ใƒชใ‚ฝใƒผใ‚นใฎใƒชใ‚นใƒˆใ€‚ <PipelineTag pipeline="image-classification"/> - [`CvtForImageClassification`] ใฏใ€ใ“ใฎ [ใ‚ตใƒณใƒ—ใƒซ ใ‚นใ‚ฏใƒชใƒ—ใƒˆ](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) ใŠใ‚ˆใณ [ใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)ใ€‚ - ๅ‚็…ง: [็”ปๅƒๅˆ†้กžใ‚ฟใ‚นใ‚ฏ ใ‚ฌใ‚คใƒ‰](../tasks/image_classification) ใ“ใ“ใซๅซใ‚ใ‚‹ใƒชใ‚ฝใƒผใ‚นใฎ้€ไฟกใซ่ˆˆๅ‘ณใŒใ‚ใ‚‹ๅ ดๅˆใฏใ€ใŠๆฐ—่ปฝใซใƒ—ใƒซ ใƒชใ‚ฏใ‚จใ‚นใƒˆใ‚’้–‹ใ„ใฆใใ ใ•ใ„ใ€‚ๅฏฉๆŸปใ•ใ›ใฆใ„ใŸใ ใใพใ™ใ€‚ใƒชใ‚ฝใƒผใ‚นใฏใ€ๆ—ขๅญ˜ใฎใƒชใ‚ฝใƒผใ‚นใ‚’่ค‡่ฃฝใ™ใ‚‹ใฎใงใฏใชใใ€ไฝ•ใ‹ๆ–ฐใ—ใ„ใ‚‚ใฎใ‚’็คบใ™ใ“ใจใŒ็†ๆƒณ็š„ใงใ™ใ€‚ ## CvtConfig [[autodoc]] CvtConfig <frameworkcontent> <pt> ## CvtModel [[autodoc]] CvtModel - forward ## CvtForImageClassification [[autodoc]] CvtForImageClassification - forward </pt> <tf> ## TFCvtModel [[autodoc]] TFCvtModel - call ## TFCvtForImageClassification [[autodoc]] TFCvtForImageClassification - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/cvt.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/cvt.md", "repo_id": "transformers", "token_count": 2380 }
425
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Padding and truncation ใƒใƒƒใƒๅ…ฅๅŠ›ใฏใ—ใฐใ—ใฐ็•ฐใชใ‚‹้•ทใ•ใงใ‚ใ‚Šใ€ๅ›บๅฎšใ‚ตใ‚คใ‚บใฎใƒ†ใƒณใ‚ฝใƒซใซๅค‰ๆ›ใงใใชใ„ใŸใ‚ใ€ๅค‰ๅ‹•ใ™ใ‚‹้•ทใ•ใฎใƒใƒƒใƒใ‹ใ‚‰้•ทๆ–นๅฝขใฎใƒ†ใƒณใ‚ฝใƒซใ‚’ไฝœๆˆใ™ใ‚‹ใŸใ‚ใฎๆˆฆ็•ฅใจใ—ใฆใ€ใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใจๅˆ‡ใ‚Š่ฉฐใ‚ใŒใ‚ใ‚Šใพใ™ใ€‚ใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใฏใ€็Ÿญใ„ใ‚ทใƒผใ‚ฑใƒณใ‚นใŒใƒใƒƒใƒๅ†…ใฎๆœ€้•ทใ‚ทใƒผใ‚ฑใƒณใ‚นใพใŸใฏใƒขใƒ‡ใƒซใŒๅ—ใ‘ๅ…ฅใ‚Œใ‚‹ๆœ€ๅคง้•ทใจๅŒใ˜้•ทใ•ใซใชใ‚‹ใ‚ˆใ†ใซใ€็‰นๅˆฅใช**ใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใƒˆใƒผใ‚ฏใƒณ**ใ‚’่ฟฝๅŠ ใ—ใพใ™ใ€‚ๅˆ‡ใ‚Š่ฉฐใ‚ใฏใ€้•ทใ„ใ‚ทใƒผใ‚ฑใƒณใ‚นใ‚’ๅˆ‡ใ‚Š่ฉฐใ‚ใ‚‹ใ“ใจใง้€†ๆ–นๅ‘ใซๆฉŸ่ƒฝใ—ใพใ™ใ€‚ ใปใจใ‚“ใฉใฎๅ ดๅˆใ€ใƒใƒƒใƒใ‚’ๆœ€้•ทใ‚ทใƒผใ‚ฑใƒณใ‚นใฎ้•ทใ•ใซใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใ—ใ€ใƒขใƒ‡ใƒซใŒๅ—ใ‘ๅ…ฅใ‚Œใ‚‹ๆœ€ๅคง้•ทใซๅˆ‡ใ‚Š่ฉฐใ‚ใ‚‹ใ“ใจใงใ€ใ†ใพใๅ‹•ไฝœใ—ใพใ™ใ€‚ใŸใ ใ—ใ€APIใฏใใ‚ŒไปฅไธŠใฎๆˆฆ็•ฅใ‚‚ใ‚ตใƒใƒผใƒˆใ—ใฆใ„ใพใ™ใ€‚ๅฟ…่ฆใช3ใคใฎๅผ•ๆ•ฐใฏๆฌกใฎใจใŠใ‚Šใงใ™๏ผš`padding`ใ€`truncation`ใ€ใŠใ‚ˆใณ `max_length`ใ€‚ `padding`ๅผ•ๆ•ฐใฏใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใ‚’ๅˆถๅพกใ—ใพใ™ใ€‚ใƒ–ใƒผใƒซๅ€คใพใŸใฏๆ–‡ๅญ—ๅˆ—ใงใ‚ใ‚‹ใ“ใจใŒใงใใพใ™๏ผš - `True`ใพใŸใฏ`'longest'`๏ผšใƒใƒƒใƒๅ†…ใฎๆœ€้•ทใ‚ทใƒผใ‚ฑใƒณใ‚นใซใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใ‚’่ฟฝๅŠ ใ—ใพใ™๏ผˆใ‚ทใƒผใ‚ฑใƒณใ‚นใŒ1ใคใ—ใ‹ๆไพ›ใ•ใ‚Œใชใ„ๅ ดๅˆใ€ใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใฏ้ฉ็”จใ•ใ‚Œใพใ›ใ‚“๏ผ‰ใ€‚ - `max_length'`๏ผš`max_length`ๅผ•ๆ•ฐใงๆŒ‡ๅฎšใ•ใ‚ŒใŸ้•ทใ•ใพใงใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใ‚’่ฟฝๅŠ ใ—ใพใ™ใ€‚ใพใŸใฏ`max_length`ใŒๆไพ›ใ•ใ‚Œใฆใ„ใชใ„ๅ ดๅˆใฏใƒขใƒ‡ใƒซใŒๅ—ใ‘ๅ…ฅใ‚Œใ‚‹ๆœ€ๅคง้•ท๏ผˆ`max_length=None`๏ผ‰ใ€‚ใ‚ทใƒผใ‚ฑใƒณใ‚นใŒ1ใคใ—ใ‹ๆไพ›ใ•ใ‚Œใฆใ„ใ‚‹ๅ ดๅˆใงใ‚‚ใ€ใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใฏ้ฉ็”จใ•ใ‚Œใพใ™ใ€‚ - `False`ใพใŸใฏ`'do_not_pad'`๏ผšใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใฏ้ฉ็”จใ•ใ‚Œใพใ›ใ‚“ใ€‚ใ“ใ‚ŒใŒใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎๅ‹•ไฝœใงใ™ใ€‚ `truncation`ๅผ•ๆ•ฐใฏๅˆ‡ใ‚Š่ฉฐใ‚ใ‚’ๅˆถๅพกใ—ใพใ™ใ€‚ใƒ–ใƒผใƒซๅ€คใพใŸใฏๆ–‡ๅญ—ๅˆ—ใงใ‚ใ‚‹ใ“ใจใŒใงใใพใ™๏ผš - `True`ใพใŸใฏ`'longest_first'`๏ผšๆœ€ๅคง้•ทใ‚’`max_length`ๅผ•ๆ•ฐใงๆŒ‡ๅฎšใ™ใ‚‹ใ‹ใ€ใƒขใƒ‡ใƒซใŒๅ—ใ‘ๅ…ฅใ‚Œใ‚‹ๆœ€ๅคง้•ท๏ผˆ`max_length=None`๏ผ‰ใพใงๅˆ‡ใ‚Š่ฉฐใ‚ใพใ™ใ€‚ใ“ใ‚Œใฏใƒˆใƒผใ‚ฏใƒณใ”ใจใซๅˆ‡ใ‚Š่ฉฐใ‚ใ€้ฉๅˆ‡ใช้•ทใ•ใซ้”ใ™ใ‚‹ใพใงใƒšใ‚ขๅ†…ใฎๆœ€้•ทใ‚ทใƒผใ‚ฑใƒณใ‚นใ‹ใ‚‰ใƒˆใƒผใ‚ฏใƒณใ‚’ๅ‰Š้™คใ—ใพใ™ใ€‚ - `'only_second'`๏ผšๆœ€ๅคง้•ทใ‚’`max_length`ๅผ•ๆ•ฐใงๆŒ‡ๅฎšใ™ใ‚‹ใ‹ใ€ใƒขใƒ‡ใƒซใŒๅ—ใ‘ๅ…ฅใ‚Œใ‚‹ๆœ€ๅคง้•ท๏ผˆ`max_length=None`๏ผ‰ใพใงๅˆ‡ใ‚Š่ฉฐใ‚ใพใ™ใ€‚ใ“ใ‚Œใฏใƒšใ‚ขใฎ2็•ช็›ฎใฎๆ–‡ใ ใ‘ใ‚’ๅˆ‡ใ‚Š่ฉฐใ‚ใพใ™๏ผˆใ‚ทใƒผใ‚ฑใƒณใ‚นใฎใƒšใ‚ขใพใŸใฏใ‚ทใƒผใ‚ฑใƒณใ‚นใฎใƒใƒƒใƒใฎใƒšใ‚ขใŒๆไพ›ใ•ใ‚ŒใŸๅ ดๅˆ๏ผ‰ใ€‚ - `'only_first'`๏ผšๆœ€ๅคง้•ทใ‚’`max_length`ๅผ•ๆ•ฐใงๆŒ‡ๅฎšใ™ใ‚‹ใ‹ใ€ใƒขใƒ‡ใƒซใŒๅ—ใ‘ๅ…ฅใ‚Œใ‚‹ๆœ€ๅคง้•ท๏ผˆ`max_length=None`๏ผ‰ใพใงๅˆ‡ใ‚Š่ฉฐใ‚ใพใ™ใ€‚ใ“ใ‚Œใฏใƒšใ‚ขใฎๆœ€ๅˆใฎๆ–‡ใ ใ‘ใ‚’ๅˆ‡ใ‚Š่ฉฐใ‚ใพใ™๏ผˆใ‚ทใƒผใ‚ฑใƒณใ‚นใฎใƒšใ‚ขใพใŸใฏใ‚ทใƒผใ‚ฑใƒณใ‚นใฎใƒใƒƒใƒใฎใƒšใ‚ขใŒๆไพ›ใ•ใ‚ŒใŸๅ ดๅˆ๏ผ‰ใ€‚ - `False`ใพใŸใฏ`'do_not_truncate'`๏ผšๅˆ‡ใ‚Š่ฉฐใ‚ใฏ้ฉ็”จใ•ใ‚Œใพใ›ใ‚“ใ€‚ใ“ใ‚ŒใŒใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎๅ‹•ไฝœใงใ™ใ€‚ `max_length`ๅผ•ๆ•ฐใฏใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใจๅˆ‡ใ‚Š่ฉฐใ‚ใฎ้•ทใ•ใ‚’ๅˆถๅพกใ—ใพใ™ใ€‚ๆ•ดๆ•ฐใพใŸใฏ`None`ใงใ‚ใ‚Šใ€ใ“ใฎๅ ดๅˆใ€ใƒขใƒ‡ใƒซใŒๅ—ใ‘ๅ…ฅใ‚Œใ‚‹ๆœ€ๅคงๅ…ฅๅŠ›้•ทใซใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใง่จญๅฎšใ•ใ‚Œใพใ™ใ€‚ใƒขใƒ‡ใƒซใซ็‰นๅฎšใฎๆœ€ๅคงๅ…ฅๅŠ›้•ทใŒใชใ„ๅ ดๅˆใ€`max_length`ใธใฎๅˆ‡ใ‚Š่ฉฐใ‚ใพใŸใฏใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใฏ็„กๅŠนใซใชใ‚Šใพใ™ใ€‚ ไปฅไธ‹ใฎ่กจใฏใ€ใƒ‘ใƒ‡ใ‚ฃใƒณใ‚ฐใจๅˆ‡ใ‚Š่ฉฐใ‚ใ‚’่จญๅฎšใ™ใ‚‹ๆŽจๅฅจๆ–นๆณ•ใ‚’่ฆ็ด„ใ—ใฆใ„ใพใ™ใ€‚ไปฅไธ‹ใฎไพ‹ใฎใ„ใšใ‚Œใ‹ใงๅ…ฅๅŠ›ใ‚ทใƒผใ‚ฑใƒณใ‚นใฎใƒšใ‚ขใ‚’ไฝฟ็”จใ™ใ‚‹ๅ ดๅˆใ€`truncation=True`ใ‚’`['only_first', 'only_second', 'longest_first']`ใง้ธๆŠžใ—ใŸ`STRATEGY`ใซ็ฝฎใๆ›ใˆใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚ใคใพใ‚Šใ€`truncation='only_second'`ใพใŸใฏ`truncation='longest_first'`ใ‚’ไฝฟ็”จใ—ใฆใ€ใƒšใ‚ขๅ†…ใฎไธกๆ–นใฎใ‚ทใƒผใ‚ฑใƒณใ‚นใ‚’ๅ‰่ฟฐใฎใ‚ˆใ†ใซๅˆ‡ใ‚Š่ฉฐใ‚ใ‚‹ๆ–นๆณ•ใ‚’ๅˆถๅพกใงใใพใ™ใ€‚ | Truncation | Padding | Instruction | |--------------------------------------|-----------------------------------|---------------------------------------------------------------------------------------------| | no truncation | no padding | `tokenizer(batch_sentences)` | | | padding to max sequence in batch | `tokenizer(batch_sentences, padding=True)` or | | | | `tokenizer(batch_sentences, padding='longest')` | | | padding to max model input length | `tokenizer(batch_sentences, padding='max_length')` | | | padding to specific length | `tokenizer(batch_sentences, padding='max_length', max_length=42)` | | | padding to a multiple of a value | `tokenizer(batch_sentences, padding=True, pad_to_multiple_of=8)` | | truncation to max model input length | no padding | `tokenizer(batch_sentences, truncation=True)` or | | | | `tokenizer(batch_sentences, truncation=STRATEGY)` | | | padding to max sequence in batch | `tokenizer(batch_sentences, padding=True, truncation=True)` or | | | | `tokenizer(batch_sentences, padding=True, truncation=STRATEGY)` | | | padding to max model input length | `tokenizer(batch_sentences, padding='max_length', truncation=True)` or | | | | `tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY)` | | | padding to specific length | Not possible | | truncation to specific length | no padding | `tokenizer(batch_sentences, truncation=True, max_length=42)` or | | | | `tokenizer(batch_sentences, truncation=STRATEGY, max_length=42)` | | | padding to max sequence in batch | `tokenizer(batch_sentences, padding=True, truncation=True, max_length=42)` or | | | | `tokenizer(batch_sentences, padding=True, truncation=STRATEGY, max_length=42)` | | | padding to max model input length | Not possible | | | padding to specific length | `tokenizer(batch_sentences, padding='max_length', truncation=True, max_length=42)` or | | | | `tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY, max_length=42)` |
transformers/docs/source/ja/pad_truncation.md/0
{ "file_path": "transformers/docs/source/ja/pad_truncation.md", "repo_id": "transformers", "token_count": 4228 }
426
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Perplexity of fixed-length models [[open-in-colab]] ใƒ‘ใƒผใƒ—ใƒฌใ‚ญใ‚ทใƒ†ใ‚ฃ๏ผˆPPL๏ผ‰ใฏ่จ€่ชžใƒขใƒ‡ใƒซใฎ่ฉ•ไพกใซๆœ€ใ‚‚ไธ€่ˆฌ็š„ใชๆŒ‡ๆจ™ใฎ1ใคใงใ™ใ€‚ๆทฑๅ…ฅใ‚Šใ™ใ‚‹ๅ‰ใซใ€ใ“ใฎๆŒ‡ๆจ™ใฏ็‰นใซๅคๅ…ธ็š„ใช่จ€่ชžใƒขใƒ‡ใƒซ๏ผˆๆ™‚ใซใฏใ‚ชใƒผใƒˆใƒฌใ‚ฐใƒฌใƒƒใ‚ทใƒ–ใพใŸใฏๅ› ๆžœ่จ€่ชžใƒขใƒ‡ใƒซใจใ‚‚ๅ‘ผใฐใ‚Œใ‚‹๏ผ‰ใซ้ฉ็”จใ•ใ‚Œใ€BERTใชใฉใฎใƒžใ‚นใ‚ฏใ•ใ‚ŒใŸ่จ€่ชžใƒขใƒ‡ใƒซใซใฏ้ฉใ—ใฆใ„ใชใ„ใ“ใจใซๆณจๆ„ใ™ในใใงใ™๏ผˆใƒขใƒ‡ใƒซใฎๆฆ‚่ฆใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„[ใƒขใƒ‡ใƒซใฎๆฆ‚่ฆ](model_summary)๏ผ‰ใ€‚ ใƒ‘ใƒผใƒ—ใƒฌใ‚ญใ‚ทใƒ†ใ‚ฃใฏใ€ใ‚ทใƒผใ‚ฑใƒณใ‚นใฎๆŒ‡ๆ•ฐๅนณๅ‡่ฒ ใฎๅฏพๆ•ฐๅฐคๅบฆใจใ—ใฆๅฎš็พฉใ•ใ‚Œใพใ™ใ€‚ใƒˆใƒผใ‚ฏใƒณๅŒ–ใ•ใ‚ŒใŸใ‚ทใƒผใ‚ฑใƒณใ‚น \\(X = (x_0, x_1, \dots, x_t)\\) ใŒใ‚ใ‚‹ๅ ดๅˆใ€\\(X\\) ใฎใƒ‘ใƒผใƒ—ใƒฌใ‚ญใ‚ทใƒ†ใ‚ฃใฏๆฌกใฎใ‚ˆใ†ใซ่กจใ•ใ‚Œใพใ™ใ€‚ $$\text{PPL}(X) = \exp \left\{ {-\frac{1}{t}\sum_i^t \log p_\theta (x_i|x_{<i}) } \right\}$$ ใ“ใ“ใงใ€\\(\log p_\theta (x_i|x_{<i})\\) ใฏใƒขใƒ‡ใƒซใซใ‚ˆใ‚‹ๅ‰ใฎใƒˆใƒผใ‚ฏใƒณ \\(x_{<i}\\) ใซๅฏพใ™ใ‚‹็ฌฌiใƒˆใƒผใ‚ฏใƒณใฎๅฏพๆ•ฐๅฐคๅบฆใงใ™ใ€‚็›ดๆ„Ÿ็š„ใซใฏใ€ใ“ใ‚Œใฏใƒขใƒ‡ใƒซใŒใ‚ณใƒผใƒ‘ใ‚นๅ†…ใฎๆŒ‡ๅฎšใ•ใ‚ŒใŸใƒˆใƒผใ‚ฏใƒณใฎ้›†ๅˆใซๅฏพใ—ใฆไธ€ๆง˜ใซไบˆๆธฌใ™ใ‚‹่ƒฝๅŠ›ใฎ่ฉ•ไพกใจ่€ƒใˆใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚้‡่ฆใชใฎใฏใ€ใ“ใ‚Œใซใ‚ˆใฃใฆใƒˆใƒผใ‚ฏใƒณๅŒ–ๆ‰‹ๆณ•ใŒใƒขใƒ‡ใƒซใฎใƒ‘ใƒผใƒ—ใƒฌใ‚ญใ‚ทใƒ†ใ‚ฃใซ็›ดๆŽฅๅฝฑ้Ÿฟใ‚’ไธŽใˆใ‚‹ใŸใ‚ใ€็•ฐใชใ‚‹ใƒขใƒ‡ใƒซใ‚’ๆฏ”่ผƒใ™ใ‚‹้š›ใซใฏๅธธใซ่€ƒๆ…ฎใ™ในใใงใ‚ใ‚‹ใจใ„ใ†ใ“ใจใงใ™ใ€‚ ใ“ใ‚ŒใฏใพใŸใ€ใƒ‡ใƒผใ‚ฟใจใƒขใƒ‡ใƒซใฎไบˆๆธฌใจใฎ้–“ใฎไบคๅทฎใ‚จใƒณใƒˆใƒญใƒ”ใƒผใฎๆŒ‡ๆ•ฐๅŒ–ใจๅŒ็ญ‰ใงใ™ใ€‚ใƒ‘ใƒผใƒ—ใƒฌใ‚ญใ‚ทใƒ†ใ‚ฃใŠใ‚ˆใณใƒ“ใƒƒใƒˆใƒปใƒ‘ใƒผใƒปใ‚ญใƒฃใƒฉใ‚ฏใ‚ฟใƒผ๏ผˆBPC๏ผ‰ใจใƒ‡ใƒผใ‚ฟๅœง็ธฎใจใฎ้–ขไฟ‚ใซใคใ„ใฆใฎ่ฉณ็ดฐใชๆƒ…ๅ ฑใซใคใ„ใฆใฏใ€ใ“ใฎ[็ด ๆ™ดใ‚‰ใ—ใ„ The Gradient ใฎใƒ–ใƒญใ‚ฐ่จ˜ไบ‹](https://thegradient.pub/understanding-evaluation-metrics-for-language-models/)ใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„ใ€‚ ## Calculating PPL with fixed-length models ใƒขใƒ‡ใƒซใฎใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใ‚ตใ‚คใ‚บใซๅˆถ็ด„ใŒใชใ„ๅ ดๅˆใ€ใƒขใƒ‡ใƒซใฎใƒ‘ใƒผใƒ—ใƒฌใ‚ญใ‚ทใƒ†ใ‚ฃใ‚’่ฉ•ไพกใ™ใ‚‹ใŸใ‚ใซใฏใ€ใ‚ทใƒผใ‚ฑใƒณใ‚นใ‚’่‡ชๅทฑๅ›žๅธฐ็š„ใซๅ› ๅญๅˆ†่งฃใ—ใ€ๅ„ใ‚นใƒ†ใƒƒใƒ—ใงๅ‰ใฎใ‚ตใƒ–ใ‚ทใƒผใ‚ฑใƒณใ‚นใซๆกไปถใ‚’ไป˜ใ‘ใ‚‹ใ“ใจใง่จˆ็ฎ—ใ—ใพใ™ใ€‚ไปฅไธ‹ใซ็คบใ™ใ‚ˆใ†ใซใ€‚ <img width="600" alt="ๅฎŒๅ…จใชใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆ้•ทใฎใ‚ทใƒผใ‚ฑใƒณใ‚นใฎๅˆ†่งฃ" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_full.gif"/> ใ—ใ‹ใ—ใ€้€šๅธธใ€่ฟ‘ไผผใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ™ใ‚‹ๅ ดๅˆใ€ใƒขใƒ‡ใƒซใŒๅ‡ฆ็†ใงใใ‚‹ใƒˆใƒผใ‚ฏใƒณๆ•ฐใซๅˆถ็ด„ใŒใ‚ใ‚Šใพใ™ใ€‚ไพ‹ใˆใฐใ€ๆœ€ๅคงใฎ[GPT-2](model_doc/gpt2)ใฎใƒใƒผใ‚ธใƒงใƒณใฏ1024ใƒˆใƒผใ‚ฏใƒณใฎๅ›บๅฎš้•ทใ‚’ๆŒใฃใฆใ„ใ‚‹ใŸใ‚ใ€1024ใ‚ˆใ‚Šใ‚‚ๅคงใใ„ \\(t\\) ใซๅฏพใ—ใฆ \\(p_\theta(x_t|x_{<t})\\) ใ‚’็›ดๆŽฅ่จˆ็ฎ—ใ™ใ‚‹ใ“ใจใฏใงใใพใ›ใ‚“ใ€‚ ไปฃใ‚ใ‚Šใซใ€้€šๅธธใ€ใ‚ทใƒผใ‚ฑใƒณใ‚นใฏใƒขใƒ‡ใƒซใฎๆœ€ๅคงๅ…ฅๅŠ›ใ‚ตใ‚คใ‚บใซ็ญ‰ใ—ใ„ใ‚ตใƒ–ใ‚ทใƒผใ‚ฑใƒณใ‚นใซๅˆ†ๅ‰ฒใ•ใ‚Œใพใ™ใ€‚ใƒขใƒ‡ใƒซใฎๆœ€ๅคงๅ…ฅๅŠ›ใ‚ตใ‚คใ‚บใŒ \\(k\\) ใฎๅ ดๅˆใ€ใƒˆใƒผใ‚ฏใƒณ \\(x_t\\) ใฎๅฐคๅบฆใ‚’่ฟ‘ไผผใ™ใ‚‹ใซใฏใ€ๅฎŒๅ…จใชใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใงใฏใชใใ€ใใ‚Œใ‚’ๅ…ˆ่กŒใ™ใ‚‹ \\(k-1\\) ใƒˆใƒผใ‚ฏใƒณใซใฎใฟๆกไปถใ‚’ไป˜ใ‘ใ‚‹ใ“ใจใŒใ‚ใ‚Šใพใ™ใ€‚ใ‚ทใƒผใ‚ฑใƒณใ‚นใฎใƒขใƒ‡ใƒซใฎใƒ‘ใƒผใƒ—ใƒฌใ‚ญใ‚ทใƒ†ใ‚ฃใ‚’่ฉ•ไพกใ™ใ‚‹้š›ใ€่ช˜ๆƒ‘็š„ใงใ™ใŒ้žๅŠน็އใชๆ–นๆณ•ใฏใ€ใ‚ทใƒผใ‚ฑใƒณใ‚นใ‚’ๅˆ†ๅ‰ฒใ—ใ€ๅ„ใ‚ปใ‚ฐใƒกใƒณใƒˆใฎๅˆ†่งฃๅฏพๆ•ฐๅฐคๅบฆใ‚’็‹ฌ็ซ‹ใซๅˆ็ฎ—ใ™ใ‚‹ใ“ใจใงใ™ใ€‚ <img width="600" alt="ๅˆฉ็”จๅฏ่ƒฝใชๅฎŒๅ…จใชใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใ‚’ๆดป็”จใ—ใชใ„้žๆœ€้ฉใชPPL" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_chunked.gif"/> ใ“ใ‚Œใฏๅ„ใ‚ปใ‚ฐใƒกใƒณใƒˆใฎใƒ‘ใƒผใƒ—ใƒฌใ‚ญใ‚ทใƒ†ใ‚ฃใŒ1ๅ›žใฎใƒ•ใ‚ฉใƒฏใƒผใƒ‰ใƒ‘ใ‚นใง่จˆ็ฎ—ใงใใ‚‹ใŸใ‚ใ€่จˆ็ฎ—ใŒ่ฟ…้€Ÿใงใ™ใŒใ€้€šๅธธใ€ใƒขใƒ‡ใƒซใฏใปใจใ‚“ใฉใฎไบˆๆธฌใ‚นใƒ†ใƒƒใƒ—ใงใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใŒๅฐ‘ใชใ„ใŸใ‚ใ€ๅฎŒๅ…จใซๅ› ๅญๅˆ†่งฃใ•ใ‚ŒใŸใƒ‘ใƒผใƒ—ใƒฌใ‚ญใ‚ทใƒ†ใ‚ฃใฎๆ‚ชใ„่ฟ‘ไผผใจใชใ‚Šใ€้€šๅธธใ€ใ‚ˆใ‚Š้ซ˜ใ„๏ผˆๆ‚ชใ„๏ผ‰PPLใ‚’่ฟ”ใ—ใพใ™ใ€‚ ไปฃใ‚ใ‚Šใซใ€ๅ›บๅฎš้•ทใƒขใƒ‡ใƒซใฎPPLใฏใ‚นใƒฉใ‚คใƒ‡ใ‚ฃใƒณใ‚ฐใ‚ฆใ‚ฃใƒณใƒ‰ใ‚ฆๆˆฆ็•ฅใ‚’็”จใ„ใฆ่ฉ•ไพกใ™ใ‚‹ในใใงใ™ใ€‚ใ“ใ‚Œใซใฏใ€ใƒขใƒ‡ใƒซใŒๅ„ไบˆๆธฌใ‚นใƒ†ใƒƒใƒ—ใงใ‚ˆใ‚Šๅคšใใฎใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใ‚’ๆŒใคใ‚ˆใ†ใซใ€ใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใ‚ฆใ‚ฃใƒณใƒ‰ใ‚ฆใ‚’็นฐใ‚Š่ฟ”ใ—ใ‚นใƒฉใ‚คใƒ‰ใ•ใ›ใ‚‹ใจใ„ใ†ๆ–นๆณ•ใŒๅซใพใ‚Œใพใ™ใ€‚ <img width="600" alt="Sliding window PPL taking advantage of all available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_sliding.gif"/> ใ“ใ‚Œใฏใ‚ทใƒผใ‚ฑใƒณใ‚นใฎ็ขบ็އใฎใ‚ˆใ‚Šๆญฃ็ขบใชๅˆ†่งฃใซ่ฟ‘ใ„ใ‚‚ใฎใงใ‚ใ‚Šใ€้€šๅธธใฏใ‚ˆใ‚Šๆœ‰ๅˆฉใชใ‚นใ‚ณใ‚ขใ‚’็”Ÿๆˆใ—ใพใ™ใ€‚ๆฌ ็‚นใฏใ€ใ‚ณใƒผใƒ‘ใ‚นๅ†…ใฎๅ„ใƒˆใƒผใ‚ฏใƒณใซๅฏพใ—ใฆๅˆฅๅ€‹ใฎๅ‰ๆ–นใƒ‘ใ‚นใŒๅฟ…่ฆใงใ™ใ€‚ๅฎŸ็”จ็š„ใชๅฆฅๅ”ๆกˆใฏใ€1ใƒˆใƒผใ‚ฏใƒณใšใคใ‚นใƒฉใ‚คใƒ‰ใ™ใ‚‹ไปฃใ‚ใ‚Šใซใ€ใ‚ˆใ‚Šๅคงใใชใ‚นใƒˆใƒฉใ‚คใƒ‰ใงใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใ‚’็งปๅ‹•ใ™ใ‚‹ใ‚นใƒˆใƒฉใ‚คใƒ‰ๅž‹ใฎใ‚นใƒฉใ‚คใƒ‡ใ‚ฃใƒณใ‚ฐใ‚ฆใ‚ฃใƒณใƒ‰ใ‚ฆใ‚’ไฝฟ็”จใ™ใ‚‹ใ“ใจใงใ™ใ€‚ใ“ใ‚Œใซใ‚ˆใ‚Šใ€่จˆ็ฎ—ใŒใฏใ‚‹ใ‹ใซ้ซ˜้€Ÿใซ้€ฒ่กŒใงใใ‚‹ไธ€ๆ–นใงใ€ใƒขใƒ‡ใƒซใซใฏๅ„ใ‚นใƒ†ใƒƒใƒ—ใงไบˆๆธฌใ‚’่กŒใ†ใŸใ‚ใฎๅคงใใชใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใŒๆไพ›ใ•ใ‚Œใพใ™ใ€‚ ## Example: Calculating perplexity with GPT-2 in ๐Ÿค— Transformers GPT-2ใ‚’ไฝฟ็”จใ—ใฆใ“ใฎใƒ—ใƒญใ‚ปใ‚นใ‚’ใƒ‡ใƒขใƒณใ‚นใƒˆใƒฌใƒผใ‚ทใƒงใƒณใ—ใฆใฟใพใ—ใ‚‡ใ†ใ€‚ ```python from transformers import GPT2LMHeadModel, GPT2TokenizerFast device = "cuda" model_id = "openai-community/gpt2-large" model = GPT2LMHeadModel.from_pretrained(model_id).to(device) tokenizer = GPT2TokenizerFast.from_pretrained(model_id) ``` WikiText-2ใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใ‚’่ชญใฟ่พผใฟใ€็•ฐใชใ‚‹ใ‚นใƒฉใ‚คใƒ‡ใ‚ฃใƒณใ‚ฐใ‚ฆใ‚ฃใƒณใƒ‰ใ‚ฆๆˆฆ็•ฅใ‚’ไฝฟ็”จใ—ใฆใƒ‘ใƒผใƒ—ใƒฌใ‚ญใ‚ทใƒ†ใ‚ฃใ‚’่ฉ•ไพกใ—ใพใ™ใ€‚ใ“ใฎใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใฏๅฐ่ฆๆจกใงใ€ใ‚ปใƒƒใƒˆๅ…จไฝ“ใซๅฏพใ—ใฆๅ˜ไธ€ใฎใƒ•ใ‚ฉใƒฏใƒผใƒ‰ใƒ‘ใ‚นใ‚’ๅฎŸ่กŒใ™ใ‚‹ใ ใ‘ใชใฎใงใ€ใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆๅ…จไฝ“ใ‚’ใƒกใƒขใƒชใซ่ชญใฟ่พผใ‚“ใงใ‚จใƒณใ‚ณใƒผใƒ‰ใ™ใ‚‹ใ ใ‘ใงๅๅˆ†ใงใ™ใ€‚ ```python from datasets import load_dataset test = load_dataset("wikitext", "wikitext-2-raw-v1", split="test") encodings = tokenizer("\n\n".join(test["text"]), return_tensors="pt") ``` ๐Ÿค— Transformersใ‚’ไฝฟ็”จใ™ใ‚‹ใจใ€ๅ˜็ด”ใซ`input_ids`ใ‚’ใƒขใƒ‡ใƒซใฎ`labels`ใจใ—ใฆๆธกใ™ใ“ใจใงใ€ๅ„ใƒˆใƒผใ‚ฏใƒณใฎๅนณๅ‡่ฒ ใฎๅฏพๆ•ฐๅฐคๅบฆใŒๆๅคฑใจใ—ใฆ่ฟ”ใ•ใ‚Œใพใ™ใ€‚ใ—ใ‹ใ—ใ€ใ‚นใƒฉใ‚คใƒ‡ใ‚ฃใƒณใ‚ฐใ‚ฆใ‚ฃใƒณใƒ‰ใ‚ฆใฎใ‚ขใƒ—ใƒญใƒผใƒใงใฏใ€ๅ„ใ‚คใƒ†ใƒฌใƒผใ‚ทใƒงใƒณใงใƒขใƒ‡ใƒซใซๆธกใ™ใƒˆใƒผใ‚ฏใƒณใซใ‚ชใƒผใƒใƒผใƒฉใƒƒใƒ—ใŒใ‚ใ‚Šใพใ™ใ€‚็งใŸใกใฏใ€ใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใจใ—ใฆๆ‰ฑใฃใฆใ„ใ‚‹ใƒˆใƒผใ‚ฏใƒณใฎๅฏพๆ•ฐๅฐคๅบฆใ‚’ๆๅคฑใซๅซใ‚ใŸใใ‚ใ‚Šใพใ›ใ‚“ใ€‚ใใฎใŸใ‚ใ€ใ“ใ‚Œใ‚‰ใฎๅฏพ่ฑกใ‚’ `-100` ใซ่จญๅฎšใ—ใฆ็„ก่ฆ–ใ•ใ‚Œใ‚‹ใ‚ˆใ†ใซใ—ใพใ™ใ€‚ไปฅไธ‹ใฏใ€ใ‚นใƒˆใƒฉใ‚คใƒ‰ใ‚’ `512` ใจใ—ใŸๅ ดๅˆใฎไพ‹ใงใ™ใ€‚ใ“ใ‚Œใซใ‚ˆใ‚Šใ€ใƒขใƒ‡ใƒซใฏไปปๆ„ใฎใƒˆใƒผใ‚ฏใƒณใฎๆกไปถไป˜ใ‘ใฎๅฐคๅบฆใ‚’่จˆ็ฎ—ใ™ใ‚‹้š›ใซใ€ๅฐ‘ใชใใจใ‚‚ใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใจใ—ใฆ 512 ใƒˆใƒผใ‚ฏใƒณใ‚’ๆŒใคใ“ใจใซใชใ‚Šใพใ™๏ผˆ512 ๅ€‹ใฎๅ‰ใฎใƒˆใƒผใ‚ฏใƒณใŒๅˆฉ็”จๅฏ่ƒฝใงใ‚ใ‚‹ๅ ดๅˆ๏ผ‰ใ€‚ ```python import torch from tqdm import tqdm max_length = model.config.n_positions stride = 512 seq_len = encodings.input_ids.size(1) nlls = [] prev_end_loc = 0 for begin_loc in tqdm(range(0, seq_len, stride)): end_loc = min(begin_loc + max_length, seq_len) trg_len = end_loc - prev_end_loc # may be different from stride on last loop input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device) target_ids = input_ids.clone() target_ids[:, :-trg_len] = -100 with torch.no_grad(): outputs = model(input_ids, labels=target_ids) # loss is calculated using CrossEntropyLoss which averages over valid labels # N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels # to the left by 1. neg_log_likelihood = outputs.loss nlls.append(neg_log_likelihood) prev_end_loc = end_loc if end_loc == seq_len: break ppl = torch.exp(torch.stack(nlls).mean()) ``` ใ‚นใƒˆใƒฉใ‚คใƒ‰้•ทใŒๆœ€ๅคงๅ…ฅๅŠ›้•ทใจๅŒใ˜ๅ ดๅˆใ€ไธŠ่ฟฐใฎๆœ€้ฉใงใชใ„ใ‚นใƒฉใ‚คใƒ‡ใ‚ฃใƒณใ‚ฐใ‚ฆใ‚ฃใƒณใƒ‰ใ‚ฆๆˆฆ็•ฅใจๅŒ็ญ‰ใงใ™ใ€‚ใ‚นใƒˆใƒฉใ‚คใƒ‰ใŒๅฐใ•ใ„ใปใฉใ€ใƒขใƒ‡ใƒซใฏๅ„ไบˆๆธฌใ‚’่กŒใ†้š›ใซใ‚ˆใ‚Šๅคšใใฎใ‚ณใƒณใƒ†ใ‚ญใ‚นใƒˆใ‚’ๆŒใคใŸใ‚ใ€้€šๅธธใ€ๅ ฑๅ‘Šใ•ใ‚Œใ‚‹ๅ›ฐ้›ฃๅบฆ๏ผˆperplexity๏ผ‰ใŒๅ‘ไธŠใ—ใพใ™ใ€‚ ไธŠ่จ˜ใฎใ‚ณใƒผใƒ‰ใ‚’ `stride = 1024` ใงๅฎŸ่กŒใ™ใ‚‹ใจใ€ใ‚ชใƒผใƒใƒผใƒฉใƒƒใƒ—ใŒใชใ„็Šถๆ…‹ใงใ€็ตๆžœใฎๅ›ฐ้›ฃๅบฆ๏ผˆperplexity๏ผ‰ใฏ `19.44` ใซใชใ‚Šใพใ™ใ€‚ใ“ใ‚Œใฏ GPT-2 ใฎ่ซ–ๆ–‡ใซๅ ฑๅ‘Šใ•ใ‚ŒใŸ `19.93` ใจใปใผๅŒ็ญ‰ใงใ™ใ€‚ไธ€ๆ–นใ€`stride = 512` ใ‚’ไฝฟ็”จใ—ใ€ใ“ใฎใ‚ˆใ†ใซใ‚นใƒˆใƒฉใ‚คใƒ‡ใ‚ฃใƒณใ‚ฐใ‚ฆใ‚ฃใƒณใƒ‰ใ‚ฆๆˆฆ็•ฅใ‚’ๆŽก็”จใ™ใ‚‹ใจใ€ๅ›ฐ้›ฃๅบฆ๏ผˆperplexity๏ผ‰ใŒ `16.45` ใซๅ‘ไธŠใ—ใพใ™ใ€‚ใ“ใ‚Œใฏใ‚ˆใ‚Šๅฅฝๆ„็š„ใชใ‚นใ‚ณใ‚ขใ ใ‘ใงใชใใ€ใ‚ทใƒผใ‚ฑใƒณใ‚นใฎๅฐคๅบฆใฎ็œŸใฎ่‡ชๅทฑๅ›žๅธฐๅˆ†่งฃใซใ‚ˆใ‚Š่ฟ‘ใ„ๆ–นๆณ•ใง่จˆ็ฎ—ใ•ใ‚Œใฆใ„ใพใ™ใ€‚
transformers/docs/source/ja/perplexity.md/0
{ "file_path": "transformers/docs/source/ja/perplexity.md", "repo_id": "transformers", "token_count": 4045 }
427
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image-to-Image Task Guide [[open-in-colab]] Image-to-Image ใ‚ฟใ‚นใ‚ฏใฏใ€ใ‚ขใƒ—ใƒชใ‚ฑใƒผใ‚ทใƒงใƒณใŒ็”ปๅƒใ‚’ๅ—ไฟกใ—ใ€ๅˆฅใฎ็”ปๅƒใ‚’ๅ‡บๅŠ›ใ™ใ‚‹ใ‚ฟใ‚นใ‚ฏใงใ™ใ€‚ใ“ใ‚Œใซใฏใ€็”ปๅƒๅผทๅŒ– (่ถ…่งฃๅƒๅบฆใ€ไฝŽๅ…‰้‡ๅผทๅŒ–ใ€ใƒ‡ใ‚ฃใƒฌใ‚คใƒณใชใฉ)ใ€็”ปๅƒไฟฎๅพฉใชใฉใ‚’ๅซใ‚€ใ•ใพใ–ใพใชใ‚ตใƒ–ใ‚ฟใ‚นใ‚ฏใŒใ‚ใ‚Šใพใ™ใ€‚ ใ“ใฎใ‚ฌใ‚คใƒ‰ใงใฏใ€ๆฌกใฎๆ–นๆณ•ใ‚’่ชฌๆ˜Žใ—ใพใ™ใ€‚ - ่ถ…่งฃๅƒๅบฆใ‚ฟใ‚นใ‚ฏใซ็”ปๅƒ้–“ใฎใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใ‚’ไฝฟ็”จใ—ใพใ™ใ€‚ - ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใ‚’ไฝฟ็”จใ›ใšใซใ€ๅŒใ˜ใ‚ฟใ‚นใ‚ฏใซๅฏพใ—ใฆใ‚คใƒกใƒผใ‚ธ้–“ใƒขใƒ‡ใƒซใ‚’ๅฎŸ่กŒใ—ใพใ™ใ€‚ ใ“ใฎใ‚ฌใ‚คใƒ‰ใŒใƒชใƒชใƒผใ‚นใ•ใ‚ŒใŸๆ™‚็‚นใงใฏใ€`image-to-image`ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใฏ่ถ…่งฃๅƒๅบฆใ‚ฟใ‚นใ‚ฏใฎใฟใ‚’ใ‚ตใƒใƒผใƒˆใ—ใฆใ„ใ‚‹ใ“ใจใซๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ ๅฟ…่ฆใชใƒฉใ‚คใƒ–ใƒฉใƒชใ‚’ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ใ“ใจใ‹ใ‚‰ๅง‹ใ‚ใพใ—ใ‚‡ใ†ใ€‚ ```bash pip install transformers ``` [Swin2SR ใƒขใƒ‡ใƒซ](https://huggingface.co/caidas/swin2SR-lightweight-x2-64) ใ‚’ไฝฟ็”จใ—ใฆใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใ‚’ๅˆๆœŸๅŒ–ใงใใ‚‹ใ‚ˆใ†ใซใชใ‚Šใพใ—ใŸใ€‚ๆฌกใซใ€ใ‚คใƒกใƒผใ‚ธใ‚’ไฝฟ็”จใ—ใฆใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใ‚’ๅ‘ผใณๅ‡บใ™ใ“ใจใงใ€ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใ‚’ๆŽจ่ซ–ใงใใพใ™ใ€‚็พๆ™‚็‚นใงใฏใ€[Swin2SR ใƒขใƒ‡ใƒซ](https://huggingface.co/models?sort=trending&search=swin2sr) ใฎใฟใŒใ“ใฎใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใงใ‚ตใƒใƒผใƒˆใ•ใ‚Œใฆใ„ใพใ™ใ€‚ ```python from transformers import pipeline device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') pipe = pipeline(task="image-to-image", model="caidas/swin2SR-lightweight-x2-64", device=device) ``` ใงใฏใ€็”ปๅƒใ‚’่ชญใฟ่พผใฟใพใ—ใ‚‡ใ†ใ€‚ ```python from PIL import Image import requests url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat.jpg" image = Image.open(requests.get(url, stream=True).raw) print(image.size) ``` ```bash # (532, 432) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat.jpg" alt="Photo of a cat"/> </div> ใ“ใ‚Œใงใ€ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใ‚’ไฝฟ็”จใ—ใฆๆŽจ่ซ–ใ‚’ๅฎŸ่กŒใงใใ‚‹ใ‚ˆใ†ใซใชใ‚Šใพใ—ใŸใ€‚็Œซใฎ็”ปๅƒใฎๆ‹กๅคงใƒใƒผใ‚ธใƒงใƒณใ‚’ๅ–ๅพ—ใ—ใพใ™ใ€‚ ```python upscaled = pipe(image) print(upscaled.size) ``` ```bash # (1072, 880) ``` ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใ‚’ไฝฟ็”จใ›ใšใซ่‡ชๅˆ†ใงๆŽจ่ซ–ใ‚’ๅฎŸ่กŒใ—ใŸใ„ๅ ดๅˆใฏใ€ใƒˆใƒฉใƒณใ‚นใƒ•ใ‚ฉใƒผใƒžใƒผใฎ `Swin2SRForImageSuperResolution` ใ‚ฏใƒฉใ‚นใจ `Swin2SRImageProcessor` ใ‚ฏใƒฉใ‚นใ‚’ไฝฟ็”จใงใใพใ™ใ€‚ใ“ใ‚ŒใซใฏๅŒใ˜ใƒขใƒ‡ใƒซใฎใƒใ‚งใƒƒใ‚ฏใƒใ‚คใƒณใƒˆใ‚’ไฝฟ็”จใ—ใพใ™ใ€‚ใƒขใƒ‡ใƒซใจใƒ—ใƒญใ‚ปใƒƒใ‚ตใ‚’ๅˆๆœŸๅŒ–ใ—ใพใ—ใ‚‡ใ†ใ€‚ ```python from transformers import Swin2SRForImageSuperResolution, Swin2SRImageProcessor model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-lightweight-x2-64").to(device) processor = Swin2SRImageProcessor("caidas/swin2SR-lightweight-x2-64") ``` `pipeline`ใ€ใฏใ€่‡ชๅˆ†ใง่กŒใ†ๅฟ…่ฆใŒใ‚ใ‚‹ๅ‰ๅ‡ฆ็†ใจๅพŒๅ‡ฆ็†ใฎใ‚นใƒ†ใƒƒใƒ—ใ‚’ๆŠฝ่ฑกๅŒ–ใ™ใ‚‹ใฎใงใ€็”ปๅƒใ‚’ๅ‰ๅ‡ฆ็†ใ—ใพใ—ใ‚‡ใ†ใ€‚็”ปๅƒใ‚’ใƒ—ใƒญใ‚ปใƒƒใ‚ตใซๆธกใ—ใฆใ‹ใ‚‰ใ€ใƒ”ใ‚ฏใ‚ปใƒซๅ€คใ‚’ GPU ใซ็งปๅ‹•ใ—ใพใ™ใ€‚ ```python pixel_values = processor(image, return_tensors="pt").pixel_values print(pixel_values.shape) pixel_values = pixel_values.to(device) ``` ใ“ใ‚Œใงใ€ใƒ”ใ‚ฏใ‚ปใƒซๅ€คใ‚’ใƒขใƒ‡ใƒซใซๆธกใ™ใ“ใจใง็”ปๅƒใ‚’ๆŽจๆธฌใงใใ‚‹ใ‚ˆใ†ใซใชใ‚Šใพใ—ใŸใ€‚ ```python import torch with torch.no_grad(): outputs = model(pixel_values) ``` ๅ‡บๅŠ›ใฏใ€ไปฅไธ‹ใฎใ‚ˆใ†ใช `ImageSuperResolutionOutput` ใ‚ฟใ‚คใƒ—ใฎใ‚ชใƒ–ใ‚ธใ‚งใ‚ฏใƒˆใงใ™ ๐Ÿ‘‡ ``` (loss=None, reconstruction=tensor([[[[0.8270, 0.8269, 0.8275, ..., 0.7463, 0.7446, 0.7453], [0.8287, 0.8278, 0.8283, ..., 0.7451, 0.7448, 0.7457], [0.8280, 0.8273, 0.8269, ..., 0.7447, 0.7446, 0.7452], ..., [0.5923, 0.5933, 0.5924, ..., 0.0697, 0.0695, 0.0706], [0.5926, 0.5932, 0.5926, ..., 0.0673, 0.0687, 0.0705], [0.5927, 0.5914, 0.5922, ..., 0.0664, 0.0694, 0.0718]]]], device='cuda:0'), hidden_states=None, attentions=None) ``` `reconstruction`ใ‚’ๅ–ๅพ—ใ—ใ€ใใ‚Œใ‚’่ฆ–่ฆšๅŒ–ใ™ใ‚‹ใŸใ‚ใซๅพŒๅ‡ฆ็†ใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ใฉใฎใ‚ˆใ†ใซ่ฆ‹ใˆใ‚‹ใ‹่ฆ‹ใฆใฟใพใ—ใ‚‡ใ†ใ€‚ ```python outputs.reconstruction.data.shape # torch.Size([1, 3, 880, 1072]) ``` ๅ‡บๅŠ›ใ‚’ๅœง็ธฎใ—ใฆ่ปธ 0 ใ‚’ๅ‰Š้™คใ—ใ€ๅ€คใ‚’ใ‚ฏใƒชใƒƒใƒ—ใ—ใฆใ‹ใ‚‰ใ€ใใ‚Œใ‚’ numpy float ใซๅค‰ๆ›ใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ๆฌกใซใ€่ปธใ‚’ [1072, 880] ใฎๅฝข็Šถใซใชใ‚‹ใ‚ˆใ†ใซ้…็ฝฎใ—ใ€ๆœ€ๅพŒใซๅ‡บๅŠ›ใ‚’็ฏ„ๅ›ฒ [0, 255] ใซๆˆปใ—ใพใ™ใ€‚ ```python import numpy as np # squeeze, take to CPU and clip the values output = outputs.reconstruction.data.squeeze().cpu().clamp_(0, 1).numpy() # rearrange the axes output = np.moveaxis(output, source=0, destination=-1) # bring values back to pixel values range output = (output * 255.0).round().astype(np.uint8) Image.fromarray(output) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat_upscaled.png" alt="Upscaled photo of a cat"/> </div>
transformers/docs/source/ja/tasks/image_to_image.md/0
{ "file_path": "transformers/docs/source/ja/tasks/image_to_image.md", "repo_id": "transformers", "token_count": 2420 }
428
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Zero-shot image classification [[open-in-colab]] ใ‚ผใƒญใ‚ทใƒงใƒƒใƒˆ็”ปๅƒๅˆ†้กžใฏใ€ๆฌกใฎใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ—ใฆ็”ปๅƒใ‚’ใ•ใพใ–ใพใชใ‚ซใƒ†ใ‚ดใƒชใซๅˆ†้กžใ™ใ‚‹ใ‚ฟใ‚นใ‚ฏใงใ™ใ€‚ ใ“ใ‚Œใ‚‰ใฎ็‰นๅฎšใฎใ‚ซใƒ†ใ‚ดใƒชใฎใƒฉใƒ™ใƒซไป˜ใใฎไพ‹ใ‚’ๅซใ‚€ใƒ‡ใƒผใ‚ฟใซๅฏพใ—ใฆๆ˜Ž็คบ็š„ใซใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ•ใ‚Œใฆใ„ใชใ„ใ€‚ ๅพ“ๆฅใ€็”ปๅƒๅˆ†้กžใซใฏใ€ใƒฉใƒ™ใƒซไป˜ใ็”ปๅƒใฎ็‰นๅฎšใฎใ‚ปใƒƒใƒˆใงใƒขใƒ‡ใƒซใ‚’ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใ€ใ“ใฎใƒขใƒ‡ใƒซใฏๆฌกใฎใ“ใจใ‚’ๅญฆ็ฟ’ใ—ใพใ™ใ€‚ ็‰นๅฎšใฎ็”ปๅƒใฎ็‰นๅพดใ‚’ใƒฉใƒ™ใƒซใซใ€Œใƒžใƒƒใƒ”ใƒณใ‚ฐใ€ใ—ใพใ™ใ€‚ๅˆ†้กžใ‚ฟใ‚นใ‚ฏใซใใฎใ‚ˆใ†ใชใƒขใƒ‡ใƒซใ‚’ไฝฟ็”จใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚‹ๅ ดๅˆใ€ ๆ–ฐใ—ใ„ใƒฉใƒ™ใƒซใฎใ‚ปใƒƒใƒˆใงใฏใ€ใƒขใƒ‡ใƒซใ‚’ "ๅ†่ชฟๆ•ด" ใ™ใ‚‹ใŸใ‚ใซๅพฎ่ชฟๆ•ดใŒๅฟ…โ€‹โ€‹่ฆใงใ™ใ€‚ ๅฏพ็…ง็š„ใซใ€ใ‚ผใƒญใ‚ทใƒงใƒƒใƒˆใพใŸใฏใ‚ชใƒผใƒ—ใƒณ่ชžๅฝ™็”ปๅƒๅˆ†้กžใƒขใƒ‡ใƒซใฏใ€้€šๅธธใ€ๅคง่ฆๆจกใชใ‚ทใ‚นใƒ†ใƒ ใงใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ•ใ‚ŒใŸใƒžใƒซใƒใƒขใƒผใƒ€ใƒซ ใƒขใƒ‡ใƒซใงใ™ใ€‚ ็”ปๅƒใจ้–ข้€ฃใ™ใ‚‹่ชฌๆ˜Žใฎใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใ€‚ใ“ใ‚Œใ‚‰ใฎใƒขใƒ‡ใƒซใฏใ€ใ‚ผใƒญใ‚ทใƒงใƒƒใƒˆ็”ปๅƒๅˆ†้กžใ‚’ๅซใ‚€ๅคšใใฎไธ‹ๆตใ‚ฟใ‚นใ‚ฏใซไฝฟ็”จใงใใ‚‹ใ€่ชฟๆ•ดใ•ใ‚ŒใŸ่ฆ–่ฆš่จ€่ชž่กจ็พใ‚’ๅญฆ็ฟ’ใ—ใพใ™ใ€‚ ใ“ใ‚Œใฏใ€็”ปๅƒๅˆ†้กžใซๅฏพใ™ใ‚‹ใ‚ˆใ‚ŠๆŸ”่ปŸใชใ‚ขใƒ—ใƒญใƒผใƒใงใ‚ใ‚Šใ€ใƒขใƒ‡ใƒซใ‚’ๆ–ฐใ—ใ„ใพใ ่ฆ‹ใŸใ“ใจใฎใชใ„ใ‚ซใƒ†ใ‚ดใƒชใซไธ€่ˆฌๅŒ–ใงใใ‚‹ใ‚ˆใ†ใซใชใ‚Šใพใ™ใ€‚ ่ฟฝๅŠ ใฎใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐ ใƒ‡ใƒผใ‚ฟใ‚’ๅฟ…่ฆใจใ›ใšใ€ใƒฆใƒผใ‚ถใƒผใฏใ‚ฟใƒผใ‚ฒใƒƒใƒˆ ใ‚ชใƒ–ใ‚ธใ‚งใ‚ฏใƒˆใฎ่‡ช็”ฑๅฝขๅผใฎใƒ†ใ‚ญใ‚นใƒˆ่ชฌๆ˜Žใ‚’ๅซใ‚€็”ปๅƒใ‚’ใ‚ฏใ‚จใƒชใงใใ‚‹ใ‚ˆใ†ใซใชใ‚Šใพใ™ใ€‚ ใ“ใฎใ‚ฌใ‚คใƒ‰ใงใฏใ€ๆฌกใฎๆ–นๆณ•ใ‚’ๅญฆใณใพใ™ใ€‚ * ใ‚ผใƒญใ‚ทใƒงใƒƒใƒˆ็”ปๅƒๅˆ†้กžใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใ‚’ไฝœๆˆใ™ใ‚‹ * ๆ‰‹ๅ‹•ใงใ‚ผใƒญใ‚ทใƒงใƒƒใƒˆ็”ปๅƒๅˆ†้กžๆŽจ่ซ–ใ‚’ๅฎŸ่กŒใ—ใพใ™ ๅง‹ใ‚ใ‚‹ๅ‰ใซใ€ๅฟ…่ฆใชใƒฉใ‚คใƒ–ใƒฉใƒชใŒใ™ในใฆใ‚คใƒณใ‚นใƒˆใƒผใƒซใ•ใ‚Œใฆใ„ใ‚‹ใ“ใจใ‚’็ขบ่ชใ—ใฆใใ ใ•ใ„ใ€‚ ```bash pip install -q transformers ``` ## Zero-shot image classification pipeline ใ‚ผใƒญใ‚ทใƒงใƒƒใƒˆ็”ปๅƒๅˆ†้กžใ‚’ใ‚ตใƒใƒผใƒˆใ™ใ‚‹ใƒขใƒ‡ใƒซใงๆŽจ่ซ–ใ‚’่ฉฆใ™ๆœ€ใ‚‚็ฐกๅ˜ใชๆ–นๆณ•ใฏใ€ๅฏพๅฟœใ™ใ‚‹ [`ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณ`] ใ‚’ไฝฟ็”จใ™ใ‚‹ใ“ใจใงใ™ใ€‚ [Hugging Face Hub ใฎใƒใ‚งใƒƒใ‚ฏใƒใ‚คใƒณใƒˆ](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads) ใ‹ใ‚‰ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใ‚’ใ‚คใƒณใ‚นใ‚ฟใƒณใ‚นๅŒ–ใ—ใพใ™ใ€‚ ```python >>> from transformers import pipeline >>> checkpoint = "openai/clip-vit-large-patch14" >>> detector = pipeline(model=checkpoint, task="zero-shot-image-classification") ``` ๆฌกใซใ€ๅˆ†้กžใ—ใŸใ„็”ปๅƒใ‚’้ธๆŠžใ—ใพใ™ใ€‚ ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/g8oS8-82DxI/download?ixid=MnwxMjA3fDB8MXx0b3BpY3x8SnBnNktpZGwtSGt8fHx8fDJ8fDE2NzgxMDYwODc&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/owl.jpg" alt="Photo of an owl"/> </div> ็”ปๅƒใจๅ€™่ฃœใ‚ชใƒ–ใ‚ธใ‚งใ‚ฏใƒˆใฎใƒฉใƒ™ใƒซใ‚’ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใซๆธกใ—ใพใ™ใ€‚ใ“ใ“ใงใฏ็”ปๅƒใ‚’็›ดๆŽฅๆธกใ—ใพใ™ใ€‚ไป–ใฎ้ฉๅˆ‡ใชใ‚ชใƒ—ใ‚ทใƒงใƒณ ็”ปๅƒใธใฎใƒญใƒผใ‚ซใƒซ ใƒ‘ใ‚นใพใŸใฏ็”ปๅƒ URL ใ‚’ๅซใ‚ใพใ™ใ€‚ ๅ€™่ฃœใƒฉใƒ™ใƒซใฏใ€ใ“ใฎไพ‹ใฎใ‚ˆใ†ใซๅ˜็ด”ใชๅ˜่ชžใซใ™ใ‚‹ใ“ใจใ‚‚ใ€ใ‚ˆใ‚Š่ชฌๆ˜Ž็š„ใชๅ˜่ชžใซใ™ใ‚‹ใ“ใจใ‚‚ใงใใพใ™ใ€‚ ```py >>> predictions = detector(image, candidate_labels=["fox", "bear", "seagull", "owl"]) >>> predictions [{'score': 0.9996670484542847, 'label': 'owl'}, {'score': 0.000199399160919711, 'label': 'seagull'}, {'score': 7.392891711788252e-05, 'label': 'fox'}, {'score': 5.96074532950297e-05, 'label': 'bear'}] ``` ## Zero-shot image classification by hand ใ‚ผใƒญใ‚ทใƒงใƒƒใƒˆ็”ปๅƒๅˆ†้กžใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใฎไฝฟ็”จๆ–นๆณ•ใ‚’็†่งฃใ—ใŸใจใ“ใ‚ใงใ€ใ‚ผใƒญใ‚ทใƒงใƒƒใƒˆใ‚’ๅฎŸ่กŒใ™ใ‚‹ๆ–นๆณ•ใ‚’่ฆ‹ใฆใฟใพใ—ใ‚‡ใ†ใ€‚ ็”ปๅƒใ‚’ๆ‰‹ๅ‹•ใงๅˆ†้กžใ—ใพใ™ใ€‚ ใพใšใ€[Hugging Face Hub ใฎใƒใ‚งใƒƒใ‚ฏใƒใ‚คใƒณใƒˆ](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads) ใ‹ใ‚‰ใƒขใƒ‡ใƒซใจ้–ข้€ฃใƒ—ใƒญใ‚ปใƒƒใ‚ตใ‚’ใƒญใƒผใƒ‰ใ—ใพใ™ใ€‚ ใ“ใ“ใงใฏใ€ๅ‰ใจๅŒใ˜ใƒใ‚งใƒƒใ‚ฏใƒใ‚คใƒณใƒˆใ‚’ไฝฟ็”จใ—ใพใ™ใ€‚ ```py >>> from transformers import AutoProcessor, AutoModelForZeroShotImageClassification >>> model = AutoModelForZeroShotImageClassification.from_pretrained(checkpoint) >>> processor = AutoProcessor.from_pretrained(checkpoint) ``` ๆฐ—ๅˆ†ใ‚’ๅค‰ใˆใฆใ€ๅˆฅใฎ็”ปๅƒใ‚’ๆ’ฎใฃใฆใฟใพใ—ใ‚‡ใ†ใ€‚ ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/xBRQfR2bqNI/download?ixid=MnwxMjA3fDB8MXxhbGx8fHx8fHx8fHwxNjc4Mzg4ODEx&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg" alt="Photo of a car"/> </div> ใƒ—ใƒญใ‚ปใƒƒใ‚ตใ‚’ไฝฟ็”จใ—ใฆใƒขใƒ‡ใƒซใฎๅ…ฅๅŠ›ใ‚’ๆบ–ๅ‚™ใ—ใพใ™ใ€‚ใƒ—ใƒญใ‚ปใƒƒใ‚ตใƒผใฏใ€ ใ‚ตใ‚คใ‚บๅค‰ๆ›ดใจๆญฃ่ฆๅŒ–ใซใ‚ˆใ‚‹ใƒขใƒ‡ใƒซใฎ็”ปๅƒใ€ใŠใ‚ˆใณใƒ†ใ‚ญใ‚นใƒˆๅ…ฅๅŠ›ใ‚’ๅ‡ฆ็†ใ™ใ‚‹ใƒˆใƒผใ‚ฏใƒŠใ‚คใ‚ถใƒผใ€‚ ```py >>> candidate_labels = ["tree", "car", "bike", "cat"] >>> inputs = processor(images=image, text=candidate_labels, return_tensors="pt", padding=True) ``` ๅ…ฅๅŠ›ใ‚’ใƒขใƒ‡ใƒซใซๆธกใ—ใ€็ตๆžœใ‚’ๅพŒๅ‡ฆ็†ใ—ใพใ™ใ€‚ ```py >>> import torch >>> with torch.no_grad(): ... outputs = model(**inputs) >>> logits = outputs.logits_per_image[0] >>> probs = logits.softmax(dim=-1).numpy() >>> scores = probs.tolist() >>> result = [ ... {"score": score, "label": candidate_label} ... for score, candidate_label in sorted(zip(probs, candidate_labels), key=lambda x: -x[0]) ... ] >>> result [{'score': 0.998572, 'label': 'car'}, {'score': 0.0010570387, 'label': 'bike'}, {'score': 0.0003393686, 'label': 'tree'}, {'score': 3.1572064e-05, 'label': 'cat'}] ```
transformers/docs/source/ja/tasks/zero_shot_image_classification.md/0
{ "file_path": "transformers/docs/source/ja/tasks/zero_shot_image_classification.md", "repo_id": "transformers", "token_count": 2709 }
429
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ์บ์‹ฑ[[caching]] ๋ˆ„๊ตฐ๊ฐ€์™€ ๋Œ€ํ™”๋ฅผ ๋‚˜๋ˆ„๊ณ  ์žˆ๋Š”๋ฐ, ์ƒ๋Œ€๋ฐฉ์ด ์ด์ „์— ํ–ˆ๋˜ ๋ง์„ ๊ธฐ์–ตํ•˜์ง€ ๋ชปํ•˜๊ณ  ๋‹น์‹ ์ด ๋Œ€๋‹ตํ•  ๋•Œ๋งˆ๋‹ค ์ฒ˜์Œ๋ถ€ํ„ฐ ๋‹ค์‹œ ์‹œ์ž‘ํ•ด์•ผ ํ•œ๋‹ค๊ณ  ์ƒ์ƒํ•ด ๋ณด์„ธ์š”. ์ด๋Š” ๋А๋ฆฌ๊ณ  ๋น„ํšจ์œจ์ ์ด๊ฒ ์ฃ ? ์ด ๋น„์œ ๋ฅผ ํŠธ๋žœ์Šคํฌ๋จธ ๋ชจ๋ธ์—๋„ ์ ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ž๊ธฐํšŒ๊ท€ ๋ชจ๋ธ์˜ ์ƒ์„ฑ์€ ํ•œ ๋ฒˆ์— ํ•˜๋‚˜์˜ ํ† ํฐ์”ฉ ์˜ˆ์ธกํ•˜๊ธฐ ๋•Œ๋ฌธ์— ๋А๋ฆด ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๊ฐ๊ฐ์˜ ์ƒˆ๋กœ์šด ์˜ˆ์ธก์€ ์ด์ „์˜ ๋ชจ๋“  ๋ฌธ๋งฅ์— ์˜์กดํ•ฉ๋‹ˆ๋‹ค. 1000๋ฒˆ์งธ ํ† ํฐ์„ ์˜ˆ์ธกํ•˜๋ ค๋ฉด, ๋ชจ๋ธ์€ ์ด์ „ 999๊ฐœ ํ† ํฐ์˜ ์ •๋ณด๊ฐ€ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ์ด ์ •๋ณด๋Š” ๊ฐ ํ† ํฐ ํ‘œํ˜„๋“ค ์‚ฌ์ด์˜ ํ–‰๋ ฌ ๊ณฑ์„ ํ†ตํ•ด ํ‘œํ˜„๋ฉ๋‹ˆ๋‹ค. 1001๋ฒˆ์งธ ํ† ํฐ์„ ์˜ˆ์ธกํ•˜๋ ค๋ฉด, ์ด์ „ 999๊ฐœ ํ† ํฐ์˜ ๋™์ผํ•œ ์ •๋ณด์— ๋”ํ•˜์—ฌ 1000๋ฒˆ์งธ ํ† ํฐ์˜ ์ •๋ณด๋„ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ์ด๋ ‡๊ฒŒ ๋˜๋ฉด ํ† ํฐ๋งˆ๋‹ค ๋ชจ๋ธ์€ ๋ฐ˜๋ณต์ ์œผ๋กœ ๋งŽ์€ ํ–‰๋ ฌ ์—ฐ์‚ฐ์„ ์ˆ˜ํ–‰ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค! ์ด๋Ÿฌํ•œ ๋น„ํšจ์œจ์„ฑ์„ ์ œ๊ฑฐํ•˜๊ธฐ ์œ„ํ•ด KV ์บ์‹œ(Key-Value Cache)๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ์–ดํ…์…˜ ๋ ˆ์ด์–ด์—์„œ ์ด์ „์— ์ฒ˜๋ฆฌํ•œ ํ† ํฐ์œผ๋กœ๋ถ€ํ„ฐ ์–ป์€ ํ‚ค์™€ ๊ฐ’ ์Œ์„ ์ €์žฅํ•ด๋‘๊ณ , ์ดํ›„ ํ† ํฐ ์˜ˆ์ธก ์‹œ ์ด๋ฅผ ์žฌ์‚ฌ์šฉํ•˜์—ฌ ์—ฐ์‚ฐ์„ ์ค„์ด๋Š” ๋ฐฉ์‹์ž…๋‹ˆ๋‹ค. > [!WARNING] > ์บ์‹ฑ์€ **์ถ”๋ก **์—๋งŒ ์‚ฌ์šฉํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ํ•™์Šต ์ค‘์— ํ™œ์„ฑํ™”๋˜๋ฉด ์˜ˆ์ƒ์น˜ ๋ชปํ•œ ์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์บ์‹ฑ์ด ์–ด๋–ป๊ฒŒ ๊ทธ๋ฆฌ๊ณ  ์™œ ์ž‘๋™ํ•˜๋Š”์ง€ ๋” ์ž˜ ์ดํ•ดํ•˜๊ธฐ ์œ„ํ•ด, ์–ดํ…์…˜ ํ–‰๋ ฌ์˜ ๊ตฌ์กฐ๋ฅผ ์ž์„ธํžˆ ์‚ดํŽด๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ## ์–ดํ…์…˜ ํ–‰๋ ฌ[[attention-matrices]] **์Šค์ผ€์ผ๋“œ ๋‹ท-ํ”„๋กœ๋•ํŠธ ์–ดํ…์…˜**์€ ๋ฐฐ์น˜ ํฌ๊ธฐ `b`, ์–ดํ…์…˜ ํ—ค๋“œ ์ˆ˜ `h`, ํ˜„์žฌ๊นŒ์ง€์˜ ์‹œํ€€์Šค ๊ธธ์ด `T`, ์–ดํ…์…˜ ํ—ค๋“œ๋‹น ์ฐจ์› `d_head`์— ๋Œ€ํ•ด ์•„๋ž˜์™€ ๊ฐ™์ด ๊ณ„์‚ฐ๋ฉ๋‹ˆ๋‹ค. $$ \text{Attention}(Q, K, V) = \text{softmax}\left( \frac{Q K^\top}{\sqrt{d_{\text{head}}}} \times \text{mask} \right) V $$ ์ฟผ๋ฆฌ(`Q`), ํ‚ค(`K`), ๊ฐ’(`V`) ํ–‰๋ ฌ์€ `(b, h, T, d_head)` ํ˜•ํƒœ์˜ ์ž…๋ ฅ ์ž„๋ฒ ๋”ฉ์—์„œ์˜ ํˆฌ์˜์ž…๋‹ˆ๋‹ค. ์ธ๊ณผ์  ์–ดํ…์…˜์˜ ๊ฒฝ์šฐ, ๋งˆ์Šคํฌ๋Š” ๋ชจ๋ธ์ด ๋ฏธ๋ž˜ ํ† ํฐ์— ์–ดํ…์…˜ ํ•˜๋Š” ๊ฒƒ์„ ๋ฐฉ์ง€ํ•ฉ๋‹ˆ๋‹ค. ํ† ํฐ์ด ํ•œ ๋ฒˆ ์ฒ˜๋ฆฌ๋˜๋ฉด, ๊ทธ ํ‘œํ˜„์€ ๋ฏธ๋ž˜ ํ† ํฐ๊ณผ ๊ด€๋ จํ•˜์—ฌ ์ ˆ๋Œ€ ๋ณ€ํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ์ด๋Š” \\( K_{\text{past}} \\)์™€ \\( V_{\text{past}} \\)๋ฅผ ์บ์‹œํ•˜์—ฌ ๋งˆ์ง€๋ง‰ ํ† ํฐ์˜ ํ‘œํ˜„์„ ๊ณ„์‚ฐํ•˜๋Š” ๋ฐ ์žฌ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Œ์„ ์˜๋ฏธํ•ฉ๋‹ˆ๋‹ค. $$ \text{Attention}(q_t, [\underbrace{k_1, k_2, \dots, k_{t-1}}_{\text{cached}}, k_{t}], [\underbrace{v_1, v_2, \dots, v_{t-1}}_{\text{cached}}, v_{t}]) $$ ์ถ”๋ก  ์‹œ์—๋Š” ๋‹ค์Œ ํ† ํฐ \\( t+1 \\)์„ ์˜ˆ์ธกํ•˜๋Š” ํ‘œํ˜„ \\( x_t \\)๋ฅผ ๊ณ„์‚ฐํ•˜๊ธฐ ์œ„ํ•ด ๋งˆ์ง€๋ง‰ ํ† ํฐ์˜ ์ฟผ๋ฆฌ๋งŒ ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ๋‹จ๊ณ„์—์„œ ์ƒˆ๋กœ์šด ํ‚ค์™€ ๊ฐ’ ๋ฒกํ„ฐ๊ฐ€ ์บ์‹œ์— **์ €์žฅ**๋˜๊ณ  ๊ณผ๊ฑฐ ํ‚ค์™€ ๊ฐ’์— **์ถ”๊ฐ€**๋ฉ๋‹ˆ๋‹ค. $$ K_{\text{cache}} \leftarrow \text{concat}(K_{\text{past}}, k_t), \quad V_{\text{cache}} \leftarrow \text{concat}(V_{\text{past}}, v_t) $$ ์–ดํ…์…˜์€ ๋ชจ๋ธ์˜ ๊ฐ ๋ ˆ์ด์–ด์—์„œ ๋…๋ฆฝ์ ์œผ๋กœ ๊ณ„์‚ฐ๋˜๋ฉฐ, ์บ์‹ฑ์€ ๋ ˆ์ด์–ด๋ณ„๋กœ ์ˆ˜ํ–‰๋ฉ๋‹ˆ๋‹ค. ์บ์‹ฑ์ด ํšจ์œจ์„ฑ์„ ์–ด๋–ป๊ฒŒ ๊ฐœ์„ ํ•˜๋Š”์ง€ ๋น„๊ตํ•œ ์•„๋ž˜ ํ‘œ๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. | ์บ์‹ฑ ์—†์Œ | ์บ์‹ฑ ์‚ฌ์šฉ | |---|---| | ๋‹จ๊ณ„๋งˆ๋‹ค ์ด์ „์˜ ๋ชจ๋“  `K`์™€ `V`๋ฅผ ์žฌ๊ณ„์‚ฐ | ๋‹จ๊ณ„๋งˆ๋‹ค ํ˜„์žฌ์˜ `K`์™€ `V`๋งŒ ๊ณ„์‚ฐ | | ๋‹จ๊ณ„๋‹น ์–ดํ…์…˜ ๋น„์šฉ์ด ์‹œํ€€์Šค ๊ธธ์ด์— ๋Œ€ํ•ด **์ œ๊ณฑ** | ๋‹จ๊ณ„๋‹น ์–ดํ…์…˜ ๋น„์šฉ์ด ์‹œํ€€์Šค ๊ธธ์ด์— ๋Œ€ํ•ด **์„ ํ˜•** (๋ฉ”๋ชจ๋ฆฌ๋Š” ์„ ํ˜•์ ์œผ๋กœ ์ฆ๊ฐ€ํ•˜์ง€๋งŒ, ํ† ํฐ๋‹น ๊ณ„์‚ฐ์€ ๋‚ฎ๊ฒŒ ์œ ์ง€๋จ) | ## ์บ์‹œ ํด๋ž˜์Šค[[cache-class]] ๊ธฐ๋ณธ KV ์บ์‹œ ์ธํ„ฐํŽ˜์ด์Šค๋Š” ํ˜„์žฌ ํ† ํฐ์˜ ํ‚ค์™€ ๊ฐ’ ํ…์„œ๋ฅผ ๋ฐ›์•„์„œ ์—…๋ฐ์ดํŠธ๋œ `K`์™€ `V` ํ…์„œ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค. ์ด๋Š” ๋ชจ๋ธ์˜ `forward` ๋ฉ”์†Œ๋“œ์— ์˜ํ•ด ๋‚ด๋ถ€์ ์œผ๋กœ ๊ด€๋ฆฌ๋ฉ๋‹ˆ๋‹ค. ```py new_K, new_V = cache.update(k_t, v_t, layer_idx) attn_output = attn_layer_idx_fn(q_t, new_K, new_V) ``` Transformers์˜ [`Cache`] ํด๋ž˜์Šค๋ฅผ ์‚ฌ์šฉํ•  ๋•Œ, ์…€ํ”„ ์–ดํ…์…˜ ๋ชจ๋“ˆ์€ ๊ณผ๊ฑฐ์™€ ํ˜„์žฌ ์ •๋ณด๋ฅผ ํ†ตํ•ฉํ•˜๊ธฐ ์œ„ํ•ด ๋ช‡ ๊ฐ€์ง€ ์ค‘์š”ํ•œ ๋‹จ๊ณ„๋ฅผ ์ˆ˜ํ–‰ํ•ฉ๋‹ˆ๋‹ค. 1. ์–ดํ…์…˜ ๋ชจ๋“ˆ์€ ํ˜„์žฌ kv ์Œ์„ ์บ์‹œ์— ์ €์žฅ๋œ ๊ณผ๊ฑฐ kv ์Œ๊ณผ ์—ฐ๊ฒฐํ•ฉ๋‹ˆ๋‹ค. ์ด๋Š” `(new_tokens_length, past_kv_length + new_tokens_length)` ํ˜•ํƒœ์˜ ์–ดํ…์…˜ ๊ฐ€์ค‘์น˜๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ํ˜„์žฌ์™€ ๊ณผ๊ฑฐ kv ์Œ์ด ๋ณธ์งˆ์ ์œผ๋กœ ๊ฒฐํ•ฉํ•ด ์–ดํ…์…˜ ์ ์ˆ˜๋ฅผ ๊ณ„์‚ฐํ•˜๋ฉฐ, ๋ชจ๋ธ์ด ์ด์ „ ๋ฌธ๋งฅ๊ณผ ํ˜„์žฌ ์ž…๋ ฅ์„ ์ธ์‹ํ•˜๋„๋ก ๋ณด์žฅํ•ฉ๋‹ˆ๋‹ค. 2. `forward` ๋ฉ”์†Œ๋“œ๊ฐ€ ๋ฐ˜๋ณต์ ์œผ๋กœ ํ˜ธ์ถœ๋  ๋•Œ, ์–ดํ…์…˜ ๋งˆ์Šคํฌ ํ˜•ํƒœ๊ฐ€ ๊ณผ๊ฑฐ์™€ ํ˜„์žฌ kv ์Œ์˜ ๊ฒฐํ•ฉ๋œ ๊ธธ์ด์™€ ์ผ์น˜ํ•˜๋Š” ๊ฒƒ์ด ์ค‘์š”ํ•ฉ๋‹ˆ๋‹ค. ์–ดํ…์…˜ ๋งˆ์Šคํฌ๋Š” `(batch_size, past_kv_length + new_tokens_length)` ํ˜•ํƒœ์—ฌ์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์ด๋Š” ์ผ๋ฐ˜์ ์œผ๋กœ [`~GenerationMixin.generate`]์—์„œ ๋‚ด๋ถ€์ ์œผ๋กœ ์ฒ˜๋ฆฌ๋˜์ง€๋งŒ, [`Cache`]๋กœ ์ž์ฒด ์ƒ์„ฑ ๋ฃจํ”„๋ฅผ ๊ตฌํ˜„ํ•˜๊ณ  ์‹ถ๋‹ค๋ฉด ์ด๋ฅผ ์—ผ๋‘์— ๋‘์„ธ์š”! ์–ดํ…์…˜ ๋งˆ์Šคํฌ๋Š” ๊ณผ๊ฑฐ์™€ ํ˜„์žฌ ํ† ํฐ๊ฐ’์„ ๋ณด์œ ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. 3. `cache_position`์„ ์ธ์‹ํ•˜๋Š” ๊ฒƒ๋„ ์ค‘์š”ํ•ฉ๋‹ˆ๋‹ค. ์ด๋Š” ์œ ํšจํ•œ `cache_position` ๊ฐ’์„ ์ „๋‹ฌํ•ด์•ผ ํ•˜๋ฏ€๋กœ `forward` ๋ฉ”์†Œ๋“œ๋กœ ๋ฏธ๋ฆฌ ์ฑ„์›Œ์ง„ [`Cache`]๋ฅผ ์žฌ์‚ฌ์šฉํ•˜๊ณ  ์‹ถ์„ ๋•Œ ์ค‘์š”ํ•ฉ๋‹ˆ๋‹ค. ์ด๋Š” ์‹œํ€€์Šค์—์„œ์˜ ์ž…๋ ฅ ์œ„์น˜๋ฅผ ๋‚˜ํƒ€๋ƒ…๋‹ˆ๋‹ค. `cache_position`์€ ํŒจ๋”ฉ์— ์˜ํ–ฅ๋ฐ›์ง€ ์•Š์œผ๋ฉฐ, ๊ฐ ํ† ํฐ์— ๋Œ€ํ•ด ํ•ญ์ƒ ํ•˜๋‚˜์”ฉ ๋” ๋งŽ์€ ์œ„์น˜๋ฅผ ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด, kv ์บ์‹œ๊ฐ€ 10๊ฐœ์˜ ํ† ํฐ์„ ํฌํ•จํ•˜๋ฉด - ํŒจ๋“œ ํ† ํฐ๊ณผ ๊ด€๊ณ„์—†์ด - ๋‹ค์Œ ํ† ํฐ์˜ ์บ์‹œ ์œ„์น˜๋Š” `torch.tensor([10])`์ด์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ## ์บ์‹œ ์ €์žฅ์†Œ ๊ตฌํ˜„[[cache-storage-implementation]] ์บ์‹œ๋Š” ๊ฐ ๋ ˆ์ด์–ด๊ฐ€ key์™€ value ์บ์‹œ๋ฅผ ํฌํ•จํ•˜๋Š” ๋ ˆ์ด์–ด ๋ชฉ๋ก ํ˜•ํƒœ๋กœ ๊ตฌ์„ฑ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค. key ๋ฐ value ์บ์‹œ๋Š” `[batch_size, num_heads, seq_len, head_dim]` ํ˜•ํƒœ์˜ ํ…์„œ์ž…๋‹ˆ๋‹ค. ๋ ˆ์ด์–ด๋Š” ์„œ๋กœ ๋‹ค๋ฅธ ํƒ€์ž…์ผ ์ˆ˜ ์žˆ์œผ๋ฉฐ(์˜ˆ: `DynamicLayer`, `StaticLayer`, `SlidingWindowLayer`), ์ด๋Š” ์ฃผ๋กœ ์‹œํ€€์Šค ๊ธธ์ด๋ฅผ ์–ด๋–ป๊ฒŒ ์ฒ˜๋ฆฌํ•˜๊ณ  ์บ์‹œ๋ฅผ ์–ด๋–ป๊ฒŒ ๊ฐฑ์‹ ํ•˜๋Š”์ง€์— ๋”ฐ๋ผ ๋‹ฌ๋ผ์ง‘๋‹ˆ๋‹ค. ๊ฐ€์žฅ ๋‹จ์ˆœํ•œ ํ˜•ํƒœ๋Š” `DynamicLayer`๋กœ, ๋” ๋งŽ์€ ํ† ํฐ์ด ์ฒ˜๋ฆฌ๋จ์— ๋”ฐ๋ผ ์ ์ง„์ ์œผ๋กœ ํ™•์žฅ๋ฉ๋‹ˆ๋‹ค. ์‹œํ€€์Šค ๊ธธ์ด ์ฐจ์›(`seq_len`)์€ ์ƒˆ๋กœ์šด ํ† ํฐ์ด ์ถ”๊ฐ€๋  ๋•Œ๋งˆ๋‹ค ์ฆ๊ฐ€ํ•ฉ๋‹ˆ๋‹ค: ```py cache.layers[idx].keys = torch.cat([cache.layers[idx].keys, key_states], dim=-2) cache.layers[idx].values = torch.cat([cache.layers[idx].values, value_states], dim=-2) ``` `StaticLayer`๋‚˜ `SlidingWindowLayer`์™€ ๊ฐ™์€ ๋‹ค๋ฅธ ๋ ˆ์ด์–ด ํƒ€์ž…์€ ์บ์‹œ๊ฐ€ ์ƒ์„ฑ๋  ๋•Œ ๊ณ ์ •๋œ ์‹œํ€€์Šค ๊ธธ์ด๋ฅผ ๊ฐ€์ง€๋ฉฐ, ์ด๋Š” `torch.compile`๊ณผ ํ˜ธํ™˜๋˜๋„๋ก ๋งŒ๋“ญ๋‹ˆ๋‹ค. `SlidingWindowLayer`์˜ ๊ฒฝ์šฐ, ์ƒˆ๋กœ์šด ํ† ํฐ์ด ์ถ”๊ฐ€๋˜๋ฉด ๊ธฐ์กด ํ† ํฐ์€ ์บ์‹œ์—์„œ ์ œ๊ฑฐ๋ฉ๋‹ˆ๋‹ค. ์•„๋ž˜ ์˜ˆ์ œ๋Š” [`DynamicCache`]๋กœ ์ƒ์„ฑ ๋ฃจํ”„๋ฅผ ๋งŒ๋“œ๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค. ๋…ผ์˜๋œ ๋ฐ”์™€ ๊ฐ™์ด, ์–ดํ…์…˜ ๋งˆ์Šคํฌ๋Š” ๊ณผ๊ฑฐ์™€ ํ˜„์žฌ ํ† ํฐ๊ฐ’์˜ ์—ฐ๊ฒฐ์ด๋ฉฐ ๋‹ค์Œ ํ† ํฐ์„ ์œ„ํ•ด ์บ์‹œ ์œ„์น˜์— `1`์ด ์ถ”๊ฐ€๋ฉ๋‹ˆ๋‹ค. ```py import torch from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache, infer_device device = f"{infer_device()}:0" model_id = "meta-llama/Llama-2-7b-chat-hf" model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map=device) tokenizer = AutoTokenizer.from_pretrained(model_id) past_key_values = DynamicCache() messages = [{"role": "user", "content": "Hello, what's your name."}] inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device) generated_ids = inputs.input_ids cache_position = torch.arange(inputs.input_ids.shape[1], dtype=torch.int64, device=model.device) max_new_tokens = 10 for _ in range(max_new_tokens): outputs = model(**inputs, cache_position=cache_position, past_key_values=past_key_values, use_cache=True) # ํƒ์š•์  ๊ธฐ๋ฒ•์œผ๋กœ ๋‹ค์Œ ํ† ํฐ ํ•˜๋‚˜๋ฅผ ์ƒ˜ํ”Œ๋ง next_token_ids = outputs.logits[:, -1:].argmax(-1) generated_ids = torch.cat([generated_ids, next_token_ids], dim=-1) # ์ฒ˜๋ฆฌ๋˜์ง€ ์•Š์€ ํ† ํฐ์„ ๋‚จ๊ฒจ๋‘์–ด ๋‹ค์Œ ์ƒ์„ฑ ๋‹จ๊ณ„๋ฅผ ์œ„ํ•œ ์ž…๋ ฅ์„ ์ค€๋น„ํ•ฉ๋‹ˆ๋‹ค. ์šฐ๋ฆฌ์˜ ๊ฒฝ์šฐ ์ƒˆ๋กœ์šด ํ† ํฐ ํ•˜๋‚˜๋งŒ ์กด์žฌํ•ฉ๋‹ˆ๋‹ค. # ์œ„์—์„œ ์„ค๋ช…ํ•œ ๋Œ€๋กœ ์ƒˆ๋กœ์šด ํ† ํฐ์„ ์œ„ํ•ด ์–ดํ…์…˜ ๋งˆ์Šคํฌ๋ฅผ ํ™•์žฅํ•ฉ๋‹ˆ๋‹ค attention_mask = inputs["attention_mask"] attention_mask = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1) inputs = {"input_ids": next_token_ids, "attention_mask": attention_mask} cache_position = cache_position[-1:] + 1 # ๋‹ค์Œ ํ† ํฐ์„ ์œ„ํ•ด ํ•˜๋‚˜ ๋” ์œ„์น˜ ์ถ”๊ฐ€ print(tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]) "[INST] Hello, what's your name. [/INST] Hello! My name is LLaMA," ``` ## ์บ์‹œ ์œ„์น˜[[cache-position]] ์บ์‹œ ์œ„์น˜๋Š” ์–ดํ…์…˜ ์บ์‹œ์—์„œ ์ƒˆ๋กœ์šด ํ† ํฐ์„ ์‚ฝ์ž…ํ•  ์œ„์น˜๋ฅผ ์ถ”์ ํ•ฉ๋‹ˆ๋‹ค. ์ด๋Š” ํŒจ๋”ฉ์ด๋‚˜ ๋ฐฐ์น˜ ๊ตฌ์กฐ์™€ ๋ฌด๊ด€ํ•˜๊ฒŒ ์ปจํ…์ŠคํŠธ ๋‚ด์—์„œ ๊ฐ ํ† ํฐ์˜ ์ ˆ๋Œ€์  ์œ„์น˜๋ฅผ ๋‚˜ํƒ€๋ƒ…๋‹ˆ๋‹ค. ์ด๋ฏธ `N`๊ฐœ์˜ ํ† ํฐ์„ ์บ์‹œํ–ˆ๊ณ  ํ˜„์žฌ `K`๊ฐœ์˜ ์ƒˆ๋กœ์šด ํ† ํฐ์„ ์ฒ˜๋ฆฌํ•˜๊ณ  ์žˆ๋‹ค๊ณ  ๊ฐ€์ •ํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค. ์ƒˆ๋กœ์šด ํ† ํฐ์— ๋Œ€ํ•œ ์บ์‹œ ์œ„์น˜๋Š” `N`๋ถ€ํ„ฐ `N + K - 1`๊นŒ์ง€์˜ ๋ฒ”์œ„๊ฐ€ ๋ฉ๋‹ˆ๋‹ค. ์ฆ‰, `[N, N + 1, N + 2, ..., N + K - 1]` ์œ„์น˜์˜ ํ† ํฐ๋“ค์„ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ์บ์‹œ ์œ„์น˜๋Š” ๋‚ด๋ถ€์ ์œผ๋กœ ๋‘ ๊ฐ€์ง€ ๋ชฉ์ ์œผ๋กœ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค: 1. ์ž…๋ ฅ ์‹œํ€€์Šค์—์„œ ์ฒ˜๋ฆฌํ•  ์ƒˆ๋กœ์šด ํ† ํฐ์„ ์„ ํƒํ•˜๊ณ , ์•„์ง ์บ์‹œ๋˜์ง€ ์•Š์€ ํ† ํฐ๋งŒ ๋ชจ๋ธ์˜ `forward`์— ์ „๋‹ฌ๋˜๋„๋ก ๋ณด์žฅํ•ฉ๋‹ˆ๋‹ค. 2. ํ‚ค/๊ฐ’ ์Œ์„ ์บ์‹œ์˜ ์˜ฌ๋ฐ”๋ฅธ ์œ„์น˜์— ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. ์ด๋Š” ํŠน์ • ์บ์‹œ ๊ธธ์ด๋ฅผ ๋ฏธ๋ฆฌ ํ• ๋‹นํ•˜๋Š” [`StaticCache`]์™€ ๊ฐ™์€ ๊ณ ์ • ํฌ๊ธฐ ์บ์‹œ์—์„œ ํŠนํžˆ ์ค‘์š”ํ•ฉ๋‹ˆ๋‹ค. ์ƒ์„ฑ ๋ฃจํ”„๋Š” ์ผ๋ฐ˜์ ์œผ๋กœ ์บ์‹œ ์œ„์น˜๋ฅผ ๊ด€๋ฆฌํ•˜์ง€๋งŒ, ์‚ฌ์šฉ์ž ์ •์˜ ์ƒ์„ฑ ๋ฉ”์†Œ๋“œ๋ฅผ ์ž‘์„ฑํ•  ๋•Œ๋Š” ์บ์‹œ ์œ„์น˜๊ฐ€ ์ •ํ™•ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์บ์‹œ ์œ„์น˜๋Š” ๊ณ ์ •๋œ ์Šฌ๋กฏ์— ํ‚ค/๊ฐ’ ์ƒํƒœ๋ฅผ ์ฝ๊ณ  ์“ฐ๋Š” ๋ฐ ์‚ฌ์šฉ๋˜๊ธฐ ๋•Œ๋ฌธ์ž…๋‹ˆ๋‹ค. ```py import torch from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache, infer_device device = f"{infer_device()}:0" model_id = "meta-llama/Llama-2-7b-chat-hf" model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map=device) tokenizer = AutoTokenizer.from_pretrained(model_id) messages = [{"role": "user", "content": "You are a helpful assistant."}] inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device) generated_ids = model.generate(**inputs, use_cache=True, max_new_tokens=10) ``` ## ๋ ˆ๊ฑฐ์‹œ ์บ์‹œ ํ˜•์‹[[legacy-cache-format]] [`Cache`] ํด๋ž˜์Šค ์ด์ „์—๋Š” ์บ์‹œ๊ฐ€ ํ…์„œ์˜ ํŠœํ”Œ์˜ ํŠœํ”Œ๋กœ ์ €์žฅ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์ด ํ˜•์‹์€ ํ…์ŠคํŠธ๊ฐ€ ์ƒ์„ฑ๋จ์— ๋”ฐ๋ผ ์ฆ๊ฐ€ํ•˜๊ธฐ ๋•Œ๋ฌธ์— ๋™์ ์ด๋ฉฐ, [`DynamicCache`]์™€ ์œ ์‚ฌํ•ฉ๋‹ˆ๋‹ค. ๋ ˆ๊ฑฐ์‹œ ํ˜•์‹์€ ๋ณธ์งˆ์ ์œผ๋กœ ๋™์ผํ•œ ๋ฐ์ดํ„ฐ ๊ตฌ์กฐ์ด์ง€๋งŒ ๋‹ค๋ฅด๊ฒŒ ์กฐ์งํ™”๋˜์—ˆ์Šต๋‹ˆ๋‹ค. - ๊ฐ ๋‚ด๋ถ€ ํŠœํ”Œ์€ ๋ ˆ์ด์–ด์˜ ํ‚ค์™€ ๊ฐ’ ํ…์„œ๋ฅผ ํฌํ•จํ•˜๋Š” ํŠœํ”Œ์˜ ํŠœํ”Œ์ž…๋‹ˆ๋‹ค. - ํ…์„œ๋Š” ๋™์ผํ•œ ํ˜•ํƒœ `[batch_size, num_heads, seq_len, head_dim]`๋ฅผ ๊ฐ–์Šต๋‹ˆ๋‹ค. - ์ด ํ˜•์‹์€ ๋œ ์œ ์—ฐํ•˜๋ฉฐ ์–‘์žํ™”๋‚˜ ์˜คํ”„๋กœ๋”ฉ๊ณผ ๊ฐ™์€ ๊ธฐ๋Šฅ์„ ์ง€์›ํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ํ”„๋กœ์ ํŠธ๊ฐ€ ์ด ๋ ˆ๊ฑฐ์‹œ ํ˜•์‹์— ์˜์กดํ•œ๋‹ค๋ฉด, [`~DynamicCache.from_legacy_cache`]๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ [`DynamicCache`]๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ๊ฒƒ์„ ๊ถŒ์žฅํ•ฉ๋‹ˆ๋‹ค. ๋ ˆ๊ฑฐ์‹œ ์บ์‹œ ํ˜•์‹์€ ์‚ฌ์šฉ์ด ์ค‘๋‹จ๋˜์—ˆ์œผ๋ฉฐ `Transformers`์—์„œ ๋” ์ด์ƒ ์‚ฌ์šฉ๋˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ํŠน์ • ํ˜•์‹์—์„œ ์บ์‹œ๋ฅผ ์กฐ์ž‘ํ•˜๋Š” ์ปค์Šคํ…€ ๋กœ์ง์ด ์žˆ๋Š” ๊ฒฝ์šฐ ๋„์›€์ด ๋˜๋Š” [`DynamicCache.to_legacy_cache`] ํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ํŠœํ”Œ ํ˜•์‹์œผ๋กœ ๋‹ค์‹œ ๋ณ€ํ™˜ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ```py import torch from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", dtype=torch.float16, device_map="auto") inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device) # ์บ์‹œ๋ฅผ ๋ฐ˜ํ™˜ํ•˜๋ ค๋ฉด `return_dict_in_generate=True`๊ฐ€ ํ•„์š”ํ•˜๊ณ  `return_legacy_cache`๋Š” ๋ฐ˜ํ™˜๋œ ์บ์‹œ๋ฅผ # ๋ ˆ๊ฑฐ์‹œ ํ˜•์‹์œผ๋กœ ๊ฐ•์ œํ•ฉ๋‹ˆ๋‹ค generation_outputs = model.generate(**inputs, return_dict_in_generate=True, return_legacy_cache=True, max_new_tokens=5) cache = DynamicCache.from_legacy_cache(generation_outputs.past_key_values) legacy_format_cache = cache.to_legacy_cache() ```
transformers/docs/source/ko/cache_explanation.md/0
{ "file_path": "transformers/docs/source/ko/cache_explanation.md", "repo_id": "transformers", "token_count": 9076 }
430
<!--โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ์—ด์‹ฌํžˆ ๋ฒˆ์—ญ ์ค‘์ž…๋‹ˆ๋‹ค. ์กฐ๊ธˆ ์ด๋”ฐ ๋งŒ๋‚˜์š”!
transformers/docs/source/ko/in_translation.md/0
{ "file_path": "transformers/docs/source/ko/in_translation.md", "repo_id": "transformers", "token_count": 95 }
431
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white" > <img alt= "TensorFlow" src= "https://img.shields.io/badge/TensorFlow-FF6F00?style=flat&logo=tensorflow&logoColor=white" > <img alt= "Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?styleโ€ฆNu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC"> <img alt="SDPA" src= "https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white" > </div> </div> # ALBERT[[albert]] [ALBERT](https://huggingface.co/papers/1909.11942)๋Š” [BERT](./bert)์˜ ํ™•์žฅ์„ฑ๊ณผ ํ•™์Šต ์‹œ ๋ฉ”๋ชจ๋ฆฌ ํ•œ๊ณ„๋ฅผ ํ•ด๊ฒฐํ•˜๊ธฐ ์œ„ํ•ด ์„ค๊ณ„๋œ ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค. ์ด ๋ชจ๋ธ์€ ๋‘ ๊ฐ€์ง€ ํŒŒ๋ผ๋ฏธํ„ฐ ๊ฐ์†Œ ๊ธฐ๋ฒ•์„ ๋„์ž…ํ•ฉ๋‹ˆ๋‹ค. ์ฒซ ๋ฒˆ์งธ๋Š” ์ž„๋ฒ ๋”ฉ ํ–‰๋ ฌ ๋ถ„ํ•ด(factorized embedding parametrization)๋กœ, ํฐ ์–ดํœ˜ ์ž„๋ฒ ๋”ฉ ํ–‰๋ ฌ์„ ๋‘ ๊ฐœ์˜ ์ž‘์€ ํ–‰๋ ฌ๋กœ ๋ถ„ํ•ดํ•˜์—ฌ ํžˆ๋“  ์‚ฌ์ด์ฆˆ๋ฅผ ๋Š˜๋ ค๋„ ํŒŒ๋ผ๋ฏธํ„ฐ ์ˆ˜๊ฐ€ ํฌ๊ฒŒ ์ฆ๊ฐ€ํ•˜์ง€ ์•Š๋„๋ก ํ•ฉ๋‹ˆ๋‹ค. ๋‘ ๋ฒˆ์งธ๋Š” ๊ณ„์ธต ๊ฐ„ ํŒŒ๋ผ๋ฏธํ„ฐ ๊ณต์œ (cross-layer parameter sharing)๋กœ, ์—ฌ๋Ÿฌ ๊ณ„์ธต์ด ํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ๊ณต์œ ํ•˜์—ฌ ํ•™์Šตํ•ด์•ผ ํ•  ํŒŒ๋ผ๋ฏธํ„ฐ ์ˆ˜๋ฅผ ์ค„์ž…๋‹ˆ๋‹ค. ALBERT๋Š” BERT์—์„œ ๋ฐœ์ƒํ•˜๋Š” GPU/TPU ๋ฉ”๋ชจ๋ฆฌ ํ•œ๊ณ„, ๊ธด ํ•™์Šต ์‹œ๊ฐ„, ๊ฐ‘์ž‘์Šค๋Ÿฐ ์„ฑ๋Šฅ ์ €ํ•˜ ๋ฌธ์ œ๋ฅผ ํ•ด๊ฒฐํ•˜๊ธฐ ์œ„ํ•ด ๋งŒ๋“ค์–ด์กŒ์Šต๋‹ˆ๋‹ค. ALBERT๋Š” ํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ์ค„์ด๊ธฐ ์œ„ํ•ด ๋‘ ๊ฐ€์ง€ ๊ธฐ๋ฒ•์„ ์‚ฌ์šฉํ•˜์—ฌ ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ๋Ÿ‰์„ ์ค„์ด๊ณ  BERT์˜ ํ•™์Šต ์†๋„๋ฅผ ๋†’์ž…๋‹ˆ๋‹ค: - **์ž„๋ฒ ๋”ฉ ํ–‰๋ ฌ ๋ถ„ํ•ด:** ํฐ ์–ดํœ˜ ์ž„๋ฒ ๋”ฉ ํ–‰๋ ฌ์„ ๋‘ ๊ฐœ์˜ ๋” ์ž‘์€ ํ–‰๋ ฌ๋กœ ๋ถ„ํ•ดํ•˜์—ฌ ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ๋Ÿ‰์„ ์ค„์ž…๋‹ˆ๋‹ค. - **๊ณ„์ธต ๊ฐ„ ํŒŒ๋ผ๋ฏธํ„ฐ ๊ณต์œ :** ๊ฐ ํŠธ๋žœ์Šคํฌ๋จธ ๊ณ„์ธต๋งˆ๋‹ค ๋ณ„๋„์˜ ํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ํ•™์Šตํ•˜๋Š” ๋Œ€์‹ , ์—ฌ๋Ÿฌ ๊ณ„์ธต์ด ํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ๊ณต์œ ํ•˜์—ฌ ํ•™์Šตํ•ด์•ผ ํ•  ๊ฐ€์ค‘์น˜ ์ˆ˜๋ฅผ ๋”์šฑ ์ค„์ž…๋‹ˆ๋‹ค. ALBERT๋Š” BERT์™€ ๋งˆ์ฐฌ๊ฐ€์ง€๋กœ ์ ˆ๋Œ€ ์œ„์น˜ ์ž„๋ฒ ๋”ฉ(absolute position embeddings)์„ ์‚ฌ์šฉํ•˜๋ฏ€๋กœ, ์ž…๋ ฅ ํŒจ๋”ฉ์€ ์˜ค๋ฅธ์ชฝ์— ์ ์šฉํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์ž„๋ฒ ๋”ฉ ํฌ๊ธฐ๋Š” 128์ด๋ฉฐ, BERT์˜ 768๋ณด๋‹ค ์ž‘์Šต๋‹ˆ๋‹ค. ALBERT๋Š” ํ•œ ๋ฒˆ์— ์ตœ๋Œ€ 512๊ฐœ์˜ ํ† ํฐ์„ ์ฒ˜๋ฆฌํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋ชจ๋“  ๊ณต์‹ ALBERT ์ฒดํฌํฌ์ธํŠธ๋Š” [ALBERT ์ปค๋ฎค๋‹ˆํ‹ฐ](https://huggingface.co/albert) ์กฐ์ง์—์„œ ํ™•์ธํ•˜์‹ค ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. > [!TIP] > ์˜ค๋ฅธ์ชฝ ์‚ฌ์ด๋“œ๋ฐ”์˜ ALBERT ๋ชจ๋ธ์„ ํด๋ฆญํ•˜์‹œ๋ฉด ๋‹ค์–‘ํ•œ ์–ธ์–ด ์ž‘์—…์— ALBERT๋ฅผ ์ ์šฉํ•˜๋Š” ์˜ˆ์‹œ๋ฅผ ๋” ํ™•์ธํ•˜์‹ค ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์•„๋ž˜ ์˜ˆ์‹œ๋Š” [`Pipeline`], [`AutoModel`] ๊ทธ๋ฆฌ๊ณ  ์ปค๋งจ๋“œ๋ผ์ธ์—์„œ `[MASK]` ํ† ํฐ์„ ์˜ˆ์ธกํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค. <hfoptions id="usage"> <hfoption id="Pipeline"> ```py import torch from transformers import pipeline pipeline = pipeline( task="fill-mask", model="albert-base-v2", dtype=torch.float16, device=0 ) pipeline("์‹๋ฌผ์€ ๊ด‘ํ•ฉ์„ฑ์ด๋ผ๊ณ  ์•Œ๋ ค์ง„ ๊ณผ์ •์„ ํ†ตํ•ด [MASK]๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.", top_k=5) ``` </hfoption> <hfoption id="AutoModel"> ```py import torch from transformers import AutoModelForMaskedLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2") model = AutoModelForMaskedLM.from_pretrained( "albert/albert-base-v2", dtype=torch.float16, attn_implementation="sdpa", device_map="auto" ) prompt = "์‹๋ฌผ์€ [MASK]์ด๋ผ๊ณ  ์•Œ๋ ค์ง„ ๊ณผ์ •์„ ํ†ตํ•ด ์—๋„ˆ์ง€๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค." inputs = tokenizer(prompt, return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model(**inputs) mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1] predictions = outputs.logits[0, mask_token_index] top_k = torch.topk(predictions, k=5).indices.tolist() for token_id in top_k[0]: print(f"์˜ˆ์ธก: {tokenizer.decode([token_id])}") ``` </hfoption> <hfoption id="transformers CLI"> ```bash echo -e "Plants create [MASK] through a process known as photosynthesis." | transformers run --task fill-mask --model albert-base-v2 --device 0 ``` </hfoption> </hfoptions> ## ์ฐธ๊ณ  ์‚ฌํ•ญ[[notes]] - BERT๋Š” ์ ˆ๋Œ€ ์œ„์น˜ ์ž„๋ฒ ๋”ฉ์„ ์‚ฌ์šฉํ•˜๋ฏ€๋กœ, ์˜ค๋ฅธ์ชฝ์— ์ž…๋ ฅ์ด ํŒจ๋”ฉ๋ผ์•ผ ํ•ฉ๋‹ˆ๋‹ค. - ์ž„๋ฒ ๋”ฉ ํฌ๊ธฐ `E`๋Š” ํžˆ๋“  ํฌ๊ธฐ `H`์™€ ๋‹ค๋ฆ…๋‹ˆ๋‹ค. ์ž„๋ฒ ๋”ฉ์€ ๋ฌธ๋งฅ์— ๋…๋ฆฝ์ (๊ฐ ํ† ํฐ๋งˆ๋‹ค ํ•˜๋‚˜์˜ ์ž„๋ฒ ๋”ฉ ๋ฒกํ„ฐ)์ด๊ณ , ์€๋‹‰ ์ƒํƒœ๋Š” ๋ฌธ๋งฅ์— ์˜์กด์ (ํ† ํฐ ์‹œํ€€์Šค๋งˆ๋‹ค ํ•˜๋‚˜์˜ ์€๋‹‰ ์ƒํƒœ)์ž…๋‹ˆ๋‹ค. ์ž„๋ฒ ๋”ฉ ํ–‰๋ ฌ์€ `V x E`(V: ์–ดํœ˜ ํฌ๊ธฐ)์ด๋ฏ€๋กœ, ์ผ๋ฐ˜์ ์œผ๋กœ `H >> E`๊ฐ€ ๋” ๋…ผ๋ฆฌ์ ์ž…๋‹ˆ๋‹ค. `E < H`์ผ ๋•Œ ๋ชจ๋ธ ํŒŒ๋ผ๋ฏธํ„ฐ๊ฐ€ ๋” ์ ์–ด์ง‘๋‹ˆ๋‹ค. ## ์ฐธ๊ณ  ์ž๋ฃŒ[[resources]] ์•„๋ž˜ ์„น์…˜์˜ ์ž๋ฃŒ๋“ค์€ ๊ณต์‹ Hugging Face ๋ฐ ์ปค๋ฎค๋‹ˆํ‹ฐ(๐ŸŒŽ ํ‘œ์‹œ) ์ž๋ฃŒ๋กœ, AlBERT๋ฅผ ์‹œ์ž‘ํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋ฉ๋‹ˆ๋‹ค. ์—ฌ๊ธฐ์— ์ถ”๊ฐ€ํ•  ์ž๋ฃŒ๊ฐ€ ์žˆ๋‹ค๋ฉด Pull Request๋ฅผ ๋ณด๋‚ด์ฃผ์„ธ์š”! ๊ธฐ์กด ์ž๋ฃŒ์™€ ์ค‘๋ณต๋˜์ง€ ์•Š๊ณ  ์ƒˆ๋กœ์šด ๋‚ด์šฉ์„ ๋‹ด๊ณ  ์žˆ์œผ๋ฉด ์ข‹์Šต๋‹ˆ๋‹ค. <PipelineTag pipeline="text-classification"/> - [`AlbertForSequenceClassification`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [`TFAlbertForSequenceClassification`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [`FlaxAlbertForSequenceClassification`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification)์™€ [๋…ธํŠธ๋ถ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [ํ…์ŠคํŠธ ๋ถ„๋ฅ˜ ์ž‘์—… ๊ฐ€์ด๋“œ](../tasks/sequence_classification)์—์„œ ๋ชจ๋ธ ์‚ฌ์šฉ๋ฒ•์„ ํ™•์ธํ•˜์„ธ์š”. <PipelineTag pipeline="token-classification"/> - [`AlbertForTokenClassification`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [`TFAlbertForTokenClassification`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification)์™€ [๋…ธํŠธ๋ถ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [`FlaxAlbertForTokenClassification`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - ๐Ÿค— Hugging Face์˜ [ํ† ํฐ ๋ถ„๋ฅ˜](https://huggingface.co/course/chapter7/2?fw=pt) ๊ฐ•์ขŒ - [ํ† ํฐ ๋ถ„๋ฅ˜ ์ž‘์—… ๊ฐ€์ด๋“œ](../tasks/token_classification)์—์„œ ๋ชจ๋ธ ์‚ฌ์šฉ๋ฒ•์„ ํ™•์ธํ•˜์„ธ์š”. <PipelineTag pipeline="fill-mask"/> - [`AlbertForMaskedLM`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling)์™€ [๋…ธํŠธ๋ถ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [`TFAlbertForMaskedLM`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy)์™€ [๋…ธํŠธ๋ถ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [`FlaxAlbertForMaskedLM`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling)์™€ [๋…ธํŠธ๋ถ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - ๐Ÿค— Hugging Face์˜ [๋งˆ์Šคํ‚น ์–ธ์–ด ๋ชจ๋ธ๋ง](https://huggingface.co/course/chapter7/3?fw=pt) ๊ฐ•์ขŒ - [๋งˆ์Šคํ‚น ์–ธ์–ด ๋ชจ๋ธ๋ง ์ž‘์—… ๊ฐ€์ด๋“œ](../tasks/masked_language_modeling)์—์„œ ๋ชจ๋ธ ์‚ฌ์šฉ๋ฒ•์„ ํ™•์ธํ•˜์„ธ์š”. <PipelineTag pipeline="question-answering"/> - [`AlbertForQuestionAnswering`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)์™€ [๋…ธํŠธ๋ถ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [`TFAlbertForQuestionAnswering`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering)์™€ [๋…ธํŠธ๋ถ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [`FlaxAlbertForQuestionAnswering`]์€ ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [์งˆ์˜์‘๋‹ต](https://huggingface.co/course/chapter7/7?fw=pt) ๐Ÿค— Hugging Face ๊ฐ•์ขŒ์˜ ์ฑ•ํ„ฐ. - [์งˆ์˜์‘๋‹ต ์ž‘์—… ๊ฐ€์ด๋“œ](../tasks/question_answering)์—์„œ ๋ชจ๋ธ ์‚ฌ์šฉ๋ฒ•์„ ํ™•์ธํ•˜์„ธ์š”. **๋‹ค์ค‘ ์„ ํƒ(Multiple choice)** - [`AlbertForMultipleChoice`]๋Š” ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice)์™€ [๋…ธํŠธ๋ถ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [`TFAlbertForMultipleChoice`]๋Š” ์ด [์˜ˆ์ œ ์Šคํฌ๋ฆฝํŠธ](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/multiple-choice)์™€ [๋…ธํŠธ๋ถ](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. - [๋‹ค์ค‘ ์„ ํƒ ์ž‘์—… ๊ฐ€์ด๋“œ](../tasks/multiple_choice)์—์„œ ๋ชจ๋ธ ์‚ฌ์šฉ๋ฒ•์„ ํ™•์ธํ•˜์„ธ์š”. ## AlbertConfig[[albertconfig]] [[autodoc]] AlbertConfig ## AlbertTokenizer[[alberttokenizer]] [[autodoc]] AlbertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## AlbertTokenizerFast[[alberttokenizerfast]] [[autodoc]] AlbertTokenizerFast ## Albert ํŠนํ™” ์ถœ๋ ฅ[[albert-specific-outputs]] [[autodoc]] models.albert.modeling_albert.AlbertForPreTrainingOutput [[autodoc]] models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput <frameworkcontent> <pt> ## AlbertModel[[albertmodel]] [[autodoc]] AlbertModel - forward ## AlbertForPreTraining[[albertforpretraining]] [[autodoc]] AlbertForPreTraining - forward ## AlbertForMaskedLM[[albertformaskedlm]] [[autodoc]] AlbertForMaskedLM - forward ## AlbertForSequenceClassification[[albertforsequenceclassification]] [[autodoc]] AlbertForSequenceClassification - forward ## AlbertForMultipleChoice[[albertformultiplechoice]] [[autodoc]] AlbertForMultipleChoice ## AlbertForTokenClassification[[albertfortokenclassification]] [[autodoc]] AlbertForTokenClassification - forward ## AlbertForQuestionAnswering[[albertforquestionanswering]] [[autodoc]] AlbertForQuestionAnswering - forward </pt> <tf> ## TFAlbertModel[[tfalbertmodel]] [[autodoc]] TFAlbertModel - call ## TFAlbertForPreTraining[[tfalbertforpretraining]] [[autodoc]] TFAlbertForPreTraining - call ## TFAlbertForMaskedLM[[tfalbertformaskedlm]] [[autodoc]] TFAlbertForMaskedLM - call ## TFAlbertForSequenceClassification[[tfalbertforsequenceclassification]] [[autodoc]] TFAlbertForSequenceClassification - call ## TFAlbertForMultipleChoice[[tfalbertformultiplechoice]] [[autodoc]] TFAlbertForMultipleChoice - call ## TFAlbertForTokenClassification[[tfalbertfortokenclassification]] [[autodoc]] TFAlbertForTokenClassification - call ## TFAlbertForQuestionAnswering[[tfalbertforquestionanswering]] [[autodoc]] TFAlbertForQuestionAnswering - call </tf> <jax> ## FlaxAlbertModel[[flaxalbertmodel]] [[autodoc]] FlaxAlbertModel - **call** ## FlaxAlbertForPreTraining[[flaxalbertforpretraining]] [[autodoc]] FlaxAlbertForPreTraining - **call** ## FlaxAlbertForMaskedLM[[flaxalbertformaskedlm]] [[autodoc]] FlaxAlbertForMaskedLM - **call** ## FlaxAlbertForSequenceClassification[[flaxalbertforsequenceclassification]] [[autodoc]] FlaxAlbertForSequenceClassification - **call** ## FlaxAlbertForMultipleChoice[[flaxalbertformultiplechoice]] [[autodoc]] FlaxAlbertForMultipleChoice - **call** ## FlaxAlbertForTokenClassification[[flaxalbertfortokenclassification]] [[autodoc]] FlaxAlbertForTokenClassification - **call** ## FlaxAlbertForQuestionAnswering[[flaxalbertforquestionanswering]] [[autodoc]] FlaxAlbertForQuestionAnswering - **call** </jax> </frameworkcontent>
transformers/docs/source/ko/model_doc/albert.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/albert.md", "repo_id": "transformers", "token_count": 6765 }
432
# Cohere[[cohere]] ## ๊ฐœ์š”[[overview]] The Cohere Command-R ๋ชจ๋ธ์€ CohereํŒ€์ด [Command-R: ํ”„๋กœ๋•์…˜ ๊ทœ๋ชจ์˜ ๊ฒ€์ƒ‰ ์ฆ๊ฐ• ์ƒ์„ฑ](https://txt.cohere.com/command-r/)๋ผ๋Š” ๋ธ”๋กœ๊ทธ ํฌ์ŠคํŠธ์—์„œ ์†Œ๊ฐœ ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ๋…ผ๋ฌธ ์ดˆ๋ก: *Command-R์€ ๊ธฐ์—…์˜ ํ”„๋กœ๋•์…˜ ๊ทœ๋ชจ AI๋ฅผ ๊ฐ€๋Šฅํ•˜๊ฒŒ ํ•˜๊ธฐ ์œ„ํ•ด RAG(๊ฒ€์ƒ‰ ์ฆ๊ฐ• ์ƒ์„ฑ)์™€ ๋„๊ตฌ ์‚ฌ์šฉ์„ ๋ชฉํ‘œ๋กœ ํ•˜๋Š” ํ™•์žฅ ๊ฐ€๋Šฅํ•œ ์ƒ์„ฑ ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค. ์˜ค๋Š˜ ์šฐ๋ฆฌ๋Š” ๋Œ€๊ทœ๋ชจ ํ”„๋กœ๋•์…˜ ์›Œํฌ๋กœ๋“œ๋ฅผ ๋ชฉํ‘œ๋กœ ํ•˜๋Š” ์ƒˆ๋กœ์šด LLM์ธ Command-R์„ ์†Œ๊ฐœํ•ฉ๋‹ˆ๋‹ค. Command-R์€ ๋†’์€ ํšจ์œจ์„ฑ๊ณผ ๊ฐ•๋ ฅํ•œ ์ •ํ™•์„ฑ์˜ ๊ท ํ˜•์„ ๋งž์ถ”๋Š” 'ํ™•์žฅ ๊ฐ€๋Šฅํ•œ' ๋ชจ๋ธ ์นดํ…Œ๊ณ ๋ฆฌ๋ฅผ ๋Œ€์ƒ์œผ๋กœ ํ•˜์—ฌ, ๊ธฐ์—…๋“ค์ด ๊ฐœ๋… ์ฆ๋ช…์„ ๋„˜์–ด ํ”„๋กœ๋•์…˜ ๋‹จ๊ณ„๋กœ ๋‚˜์•„๊ฐˆ ์ˆ˜ ์žˆ๊ฒŒ ํ•ฉ๋‹ˆ๋‹ค.* *Command-R์€ ๊ฒ€์ƒ‰ ์ฆ๊ฐ• ์ƒ์„ฑ(RAG)์ด๋‚˜ ์™ธ๋ถ€ API ๋ฐ ๋„๊ตฌ ์‚ฌ์šฉ๊ณผ ๊ฐ™์€ ๊ธด ๋ฌธ๋งฅ ์ž‘์—…์— ์ตœ์ ํ™”๋œ ์ƒ์„ฑ ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค. ์ด ๋ชจ๋ธ์€ RAG ์• ํ”Œ๋ฆฌ์ผ€์ด์…˜์„ ์œ„ํ•œ ์ตœ๊ณ  ์ˆ˜์ค€์˜ ํ†ตํ•ฉ์„ ์ œ๊ณตํ•˜๊ณ  ๊ธฐ์—… ์‚ฌ์šฉ ์‚ฌ๋ก€์—์„œ ๋›ฐ์–ด๋‚œ ์„ฑ๋Šฅ์„ ๋ฐœํœ˜ํ•˜๊ธฐ ์œ„ํ•ด ์šฐ๋ฆฌ์˜ ์—…๊ณ„ ์„ ๋„์ ์ธ Embed ๋ฐ Rerank ๋ชจ๋ธ๊ณผ ์กฐํ™”๋กญ๊ฒŒ ์ž‘๋™ํ•˜๋„๋ก ์„ค๊ณ„๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ๊ธฐ์—…์ด ๋Œ€๊ทœ๋ชจ๋กœ ๊ตฌํ˜„ํ•  ์ˆ˜ ์žˆ๋„๋ก ๋งŒ๋“ค์–ด์ง„ ๋ชจ๋ธ๋กœ์„œ, Command-R์€ ๋‹ค์Œ๊ณผ ๊ฐ™์€ ํŠน์ง•์„ ์ž๋ž‘ํ•ฉ๋‹ˆ๋‹ค: - RAG ๋ฐ ๋„๊ตฌ ์‚ฌ์šฉ์— ๋Œ€ํ•œ ๊ฐ•๋ ฅํ•œ ์ •ํ™•์„ฑ - ๋‚ฎ์€ ์ง€์—ฐ ์‹œ๊ฐ„๊ณผ ๋†’์€ ์ฒ˜๋ฆฌ๋Ÿ‰ - ๋” ๊ธด 128k ์ปจํ…์ŠคํŠธ์™€ ๋‚ฎ์€ ๊ฐ€๊ฒฉ - 10๊ฐœ์˜ ์ฃผ์š” ์–ธ์–ด์— ๊ฑธ์นœ ๊ฐ•๋ ฅํ•œ ๊ธฐ๋Šฅ - ์—ฐ๊ตฌ ๋ฐ ํ‰๊ฐ€๋ฅผ ์œ„ํ•ด HuggingFace์—์„œ ์‚ฌ์šฉ ๊ฐ€๋Šฅํ•œ ๋ชจ๋ธ ๊ฐ€์ค‘์น˜ ๋ชจ๋ธ ์ฒดํฌํฌ์ธํŠธ๋Š” [์ด๊ณณ](https://huggingface.co/CohereForAI/c4ai-command-r-v01)์—์„œ ํ™•์ธํ•˜์„ธ์š”. ์ด ๋ชจ๋ธ์€ [Saurabh Dash](https://huggingface.co/saurabhdash)๊ณผ [Ahmet รœstรผn](https://huggingface.co/ahmetustun)์— ์˜ํ•ด ๊ธฐ์—ฌ ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. Hugging Face์—์„œ ์ด ์ฝ”๋“œ์˜ ๊ตฌํ˜„์€ [GPT-NeoX](https://github.com/EleutherAI/gpt-neox)์— ๊ธฐ๋ฐ˜ํ•˜์˜€์Šต๋‹ˆ๋‹ค. ## ์‚ฌ์šฉ ํŒ[[usage-tips]] <Tip warning={true}> Hub์— ์—…๋กœ๋“œ๋œ ์ฒดํฌํฌ์ธํŠธ๋“ค์€ `dtype = 'float16'`์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ์ด๋Š” `AutoModel` API๊ฐ€ ์ฒดํฌํฌ์ธํŠธ๋ฅผ `torch.float32`์—์„œ `torch.float16`์œผ๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ๋ฐ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค. ์˜จ๋ผ์ธ ๊ฐ€์ค‘์น˜์˜ `dtype`์€ `model = AutoModelForCausalLM.from_pretrained("path", dtype = "auto")`๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ์„ ์ดˆ๊ธฐํ™”ํ•  ๋•Œ `dtype="auto"`๋ฅผ ์‚ฌ์šฉํ•˜์ง€ ์•Š๋Š” ํ•œ ๋Œ€๋ถ€๋ถ„ ๋ฌด๊ด€ํ•ฉ๋‹ˆ๋‹ค. ๊ทธ ์ด์œ ๋Š” ๋ชจ๋ธ์ด ๋จผ์ € ๋‹ค์šด๋กœ๋“œ๋˜๊ณ (์˜จ๋ผ์ธ ์ฒดํฌํฌ์ธํŠธ์˜ `dtype` ์‚ฌ์šฉ), ๊ทธ ๋‹ค์Œ `torch`์˜ ๊ธฐ๋ณธ `dtype`์œผ๋กœ ๋ณ€ํ™˜๋˜๋ฉฐ(์ด๋•Œ `torch.float32`๊ฐ€ ๋จ), ๋งˆ์ง€๋ง‰์œผ๋กœ config์— `dtype`์ด ์ œ๊ณต๋œ ๊ฒฝ์šฐ ์ด๋ฅผ ์‚ฌ์šฉํ•˜๊ธฐ ๋•Œ๋ฌธ์ž…๋‹ˆ๋‹ค. ๋ชจ๋ธ์„ `float16`์œผ๋กœ ํ›ˆ๋ จํ•˜๋Š” ๊ฒƒ์€ ๊ถŒ์žฅ๋˜์ง€ ์•Š์œผ๋ฉฐ `nan`์„ ์ƒ์„ฑํ•˜๋Š” ๊ฒƒ์œผ๋กœ ์•Œ๋ ค์ ธ ์žˆ์Šต๋‹ˆ๋‹ค. ๋”ฐ๋ผ์„œ ๋ชจ๋ธ์€ `bfloat16`์œผ๋กœ ํ›ˆ๋ จํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. </Tip> ๋ชจ๋ธ๊ณผ ํ† ํฌ๋‚˜์ด์ €๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์ด ๋กœ๋“œํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python # pip install transformers from transformers import AutoTokenizer, AutoModelForCausalLM model_id = "CohereForAI/c4ai-command-r-v01" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) # Format message with the command-r chat template messages = [{"role": "user", "content": "Hello, how are you?"}] input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> gen_tokens = model.generate( input_ids, max_new_tokens=100, do_sample=True, temperature=0.3, ) gen_text = tokenizer.decode(gen_tokens[0]) print(gen_text) ``` - Flash Attention 2๋ฅผ `attn_implementation="flash_attention_2"`๋ฅผ ํ†ตํ•ด ์‚ฌ์šฉํ•  ๋•Œ๋Š”, `from_pretrained` ํด๋ž˜์Šค ๋ฉ”์„œ๋“œ์— `dtype`์„ ์ „๋‹ฌํ•˜์ง€ ๋ง๊ณ  ์ž๋™ ํ˜ผํ•ฉ ์ •๋ฐ€๋„ ํ›ˆ๋ จ(Automatic Mixed-Precision training)์„ ์‚ฌ์šฉํ•˜์„ธ์š”. `Trainer`๋ฅผ ์‚ฌ์šฉํ•  ๋•Œ๋Š” ๋‹จ์ˆœํžˆ `fp16` ๋˜๋Š” `bf16`์„ `True`๋กœ ์ง€์ •ํ•˜๋ฉด ๋ฉ๋‹ˆ๋‹ค. ๊ทธ๋ ‡์ง€ ์•Š์€ ๊ฒฝ์šฐ์—๋Š” `torch.autocast`๋ฅผ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”. ์ด๋Š” Flash Attention์ด `fp16`์™€ `bf16` ๋ฐ์ดํ„ฐ ํƒ€์ž…๋งŒ ์ง€์›ํ•˜๊ธฐ ๋•Œ๋ฌธ์— ํ•„์š”ํ•ฉ๋‹ˆ๋‹ค. ## ๋ฆฌ์†Œ์Šค[[resources]] Command-R์„ ์‹œ์ž‘ํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋˜๋Š” Hugging Face์™€ community ์ž๋ฃŒ ๋ชฉ๋ก(๐ŸŒŽ๋กœ ํ‘œ์‹œ๋จ) ์ž…๋‹ˆ๋‹ค. ์—ฌ๊ธฐ์— ํฌํ•จ๋  ์ž๋ฃŒ๋ฅผ ์ œ์ถœํ•˜๊ณ  ์‹ถ์œผ์‹œ๋‹ค๋ฉด PR(Pull Request)๋ฅผ ์—ด์–ด์ฃผ์„ธ์š”. ๋ฆฌ๋ทฐ ํ•ด๋“œ๋ฆฌ๊ฒ ์Šต๋‹ˆ๋‹ค! ์ž๋ฃŒ๋Š” ๊ธฐ์กด ์ž๋ฃŒ๋ฅผ ๋ณต์ œํ•˜๋Š” ๋Œ€์‹  ์ƒˆ๋กœ์šด ๋‚ด์šฉ์„ ๋‹ด๊ณ  ์žˆ์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. <PipelineTag pipeline="text-generation"/> FP16 ๋ชจ๋ธ ๋กœ๋”ฉ ```python # pip install transformers from transformers import AutoTokenizer, AutoModelForCausalLM model_id = "CohereForAI/c4ai-command-r-v01" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) # command-r ์ฑ— ํ…œํ”Œ๋ฆฟ์œผ๋กœ ๋ฉ”์„ธ์ง€ ํ˜•์‹์„ ์ •ํ•˜์„ธ์š” messages = [{"role": "user", "content": "Hello, how are you?"}] input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> gen_tokens = model.generate( input_ids, max_new_tokens=100, do_sample=True, temperature=0.3, ) gen_text = tokenizer.decode(gen_tokens[0]) print(gen_text) ``` bitsandbytes ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์ด์šฉํ•ด์„œ 4bit ์–‘์žํ™”๋œ ๋ชจ๋ธ ๋กœ๋”ฉ ```python # pip install transformers bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig bnb_config = BitsAndBytesConfig(load_in_4bit=True) model_id = "CohereForAI/c4ai-command-r-v01" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config) gen_tokens = model.generate( input_ids, max_new_tokens=100, do_sample=True, temperature=0.3, ) gen_text = tokenizer.decode(gen_tokens[0]) print(gen_text) ``` ## CohereConfig[[transformers.CohereConfig]] [[autodoc]] CohereConfig ## CohereTokenizerFast[[transformers.CohereTokenizerFast]] [[autodoc]] CohereTokenizerFast - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - update_post_processor - save_vocabulary ## CohereModel[[transformers.CohereModel]] [[autodoc]] CohereModel - forward ## CohereForCausalLM[[transformers.CohereForCausalLM]] [[autodoc]] CohereForCausalLM - forward
transformers/docs/source/ko/model_doc/cohere.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/cohere.md", "repo_id": "transformers", "token_count": 4097 }
433
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Informer[[informer]] ## ๊ฐœ์š”[[overview]] The Informer ๋ชจ๋ธ์€ Haoyi Zhou, Shanghang Zhang, Jieqi Peng, Shuai Zhang, Jianxin Li, Hui Xiong, Wancai Zhang๊ฐ€ ์ œ์•ˆํ•œ [Informer: ์žฅ๊ธฐ ์‹œํ€€์Šค ์‹œ๊ณ„์—ด ์˜ˆ์ธก(LSTF)์„ ์œ„ํ•œ ๋”์šฑ ํšจ์œจ์ ์ธ ํŠธ๋žœ์Šคํฌ๋จธ(Beyond Efficient Transformer)](https://huggingface.co/papers/2012.07436)๋ผ๋Š” ๋…ผ๋ฌธ์—์„œ ์†Œ๊ฐœ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์ด ๋ฐฉ๋ฒ•์€ ํ™•๋ฅ ์  ์–ดํ…์…˜ ๋ฉ”์ปค๋‹ˆ์ฆ˜์„ ๋„์ž…ํ•˜์—ฌ "๊ฒŒ์œผ๋ฅธ" ์ฟผ๋ฆฌ๊ฐ€ ์•„๋‹Œ "ํ™œ์„ฑ" ์ฟผ๋ฆฌ๋ฅผ ์„ ํƒํ•˜๊ณ , ํฌ์†Œ ํŠธ๋žœ์Šคํฌ๋จธ๋ฅผ ์ œ๊ณตํ•˜์—ฌ ๊ธฐ์กด ์–ดํ…์…˜์˜ ์ด์ฐจ์  ๊ณ„์‚ฐ ๋ฐ ๋ฉ”๋ชจ๋ฆฌ ์š”๊ตฌ์‚ฌํ•ญ์„ ์™„ํ™”ํ•ฉ๋‹ˆ๋‹ค. ํ•ด๋‹น ๋…ผ๋ฌธ์˜ ์ดˆ๋ก์ž…๋‹ˆ๋‹ค: *์‹ค์ œ๋กœ ๋งŽ์€ ์‘์šฉํ”„๋กœ๊ทธ๋žจ์—์„œ๋Š” ์žฅ๊ธฐ ์‹œํ€€์Šค ์‹œ๊ณ„์—ด ์˜ˆ์ธก(LSTF)์„ ํ•„์š”๋กœ ํ•ฉ๋‹ˆ๋‹ค. LSTF๋Š” ์ถœ๋ ฅ - ์ž…๋ ฅ ๊ฐ„ ์ •ํ™•ํ•œ ์žฅ๊ธฐ ์˜์กด์„ฑ ๊ฒฐํ•ฉ๋„๋ฅผ ํฌ์ฐฉํ•ด๋‚ด๋Š” ๋†’์€ ์˜ˆ์ธก ๋Šฅ๋ ฅ์„ ๋ชจ๋ธ์— ์š”๊ตฌํ•ฉ๋‹ˆ๋‹ค. ์ตœ๊ทผ ์—ฐ๊ตฌ๋“ค์€ ์˜ˆ์ธก ๋Šฅ๋ ฅ์„ ํ–ฅ์ƒ์‹œํ‚ฌ ์ˆ˜ ์žˆ๋Š” ํŠธ๋žœ์Šคํฌ๋จธ์˜ ์ž ์žฌ๋ ฅ์„ ๋ณด์—ฌ์ฃผ๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ๊ทธ๋Ÿฌ๋‚˜, ํŠธ๋žœ์Šคํฌ๋จธ๋ฅผ LSTF์— ์ง์ ‘ ์ ์šฉํ•˜์ง€ ๋ชปํ•˜๋„๋ก ๋ง‰๋Š” ๋ช‡ ์‹ฌ๊ฐํ•œ ๋ฌธ์ œ์ ๋“ค์ด ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋กœ, ์ด์ฐจ ์‹œ๊ฐ„ ๋ณต์žก๋„, ๋†’์€ ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ๋Ÿ‰, ์ธ์ฝ”๋”-๋””์ฝ”๋” ์•„ํ‚คํ…์ฒ˜์˜ ๋ณธ์งˆ์  ํ•œ๊ณ„๋ฅผ ๋“ค ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด๋Ÿฌํ•œ ๋ฌธ์ œ๋ฅผ ํ•ด๊ฒฐํ•˜๊ธฐ ์œ„ํ•ด LSTF๋ฅผ ์œ„ํ•œ ํšจ์œจ์ ์ธ ํŠธ๋žœ์Šคํฌ๋จธ ๊ธฐ๋ฐ˜ ๋ชจ๋ธ์ธ Informer๋ฅผ ์„ค๊ณ„ํ–ˆ์Šต๋‹ˆ๋‹ค. Informer์˜ ์„ธ๊ฐ€์ง€ ๋…ํŠนํ•œ ํŠน์„ฑ: (i) ProbSparse ์…€ํ”„ ์–ดํ…์…˜ ๋ฉ”์ปค๋‹ˆ์ฆ˜์œผ๋กœ, ์‹œ๊ฐ„ ๋ณต์žก๋„์™€ ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ๋Ÿ‰์—์„œ O(L logL)๋ฅผ ๋‹ฌ์„ฑํ•˜๋ฉฐ ์‹œํ€€์Šค ์˜์กด์„ฑ ์ •๋ ฌ์—์„œ ๋น„๊ต ๊ฐ€๋Šฅํ•œ ์„ฑ๋Šฅ์„ ๋ณด์ž…๋‹ˆ๋‹ค. (ii) ์…€ํ”„ ์–ดํ…์…˜ ์ฆ๋ฅ˜๋Š” ๊ณ„๋‹จ์‹ ๋ ˆ์ด์–ด ์ž…๋ ฅ์„ ๋ฐ˜์œผ๋กœ ์ค„์—ฌ ์ง€๋ฐฐ์ ์ธ ์–ดํ…์…˜์„ ๊ฐ•์กฐํ•˜๊ณ  ๊ทน๋‹จ์ ์œผ๋กœ ๊ธด ์ž…๋ ฅ ์‹œํ€€์Šค๋ฅผ ํšจ์œจ์ ์œผ๋กœ ์ฒ˜๋ฆฌํ•ฉ๋‹ˆ๋‹ค. (iii) ์ƒ์„ฑ ์Šคํƒ€์ผ ๋””์ฝ”๋”๋Š” ๊ฐœ๋…์ ์œผ๋กœ ๋‹จ์ˆœํ•˜์ง€๋งŒ ์žฅ๊ธฐ ์‹œ๊ณ„์—ด ์‹œํ€€์Šค๋ฅผ ๋‹จ๊ณ„๋ณ„ ๋ฐฉ์‹์ด ์•„๋‹Œ ํ•œ ๋ฒˆ์˜ ์ „๋ฐฉ ์—ฐ์‚ฐ์œผ๋กœ ์˜ˆ์ธกํ•˜์—ฌ ์žฅ๊ธฐ ์‹œํ€€์Šค ์˜ˆ์ธก์˜ ์ถ”๋ก  ์†๋„๋ฅผ ํฌ๊ฒŒ ํ–ฅ์ƒ์‹œํ‚ต๋‹ˆ๋‹ค. 4๊ฐœ์˜ ๋Œ€๊ทœ๋ชจ ๋ฐ์ดํ„ฐ์…‹์— ๊ฑธ์นœ ๊ด‘๋ฒ”์œ„ํ•œ ์‹คํ—˜์€ Informer๊ฐ€ ๊ธฐ์กด ๋ฐฉ๋ฒ•๋“ค์„ ํฌ๊ฒŒ ๋Šฅ๊ฐ€ํ•˜๋ฉฐ LSTF ๋ฌธ์ œ์— ์ƒˆ๋กœ์šด ํ•ด๊ฒฐ์ฑ…์„ ์ œ๊ณตํ•จ์„ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค.* ์ด ๋ชจ๋ธ์€ [elisim](https://huggingface.co/elisim)์™€ [kashif](https://huggingface.co/kashif)๊ฐ€ ๊ธฐ์—ฌํ–ˆ์Šต๋‹ˆ๋‹ค. ์›๋ณธ ์ฝ”๋“œ๋Š” [์ด๊ณณ](https://github.com/zhouhaoyi/Informer2020)์—์„œ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ## ์ž๋ฃŒ[[resources]] ์‹œ์ž‘ํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋˜๋Š” Hugging Face์™€ community ์ž๋ฃŒ ๋ชฉ๋ก(๐ŸŒŽ๋กœ ํ‘œ์‹œ๋จ) ์ž…๋‹ˆ๋‹ค. ์—ฌ๊ธฐ์— ํฌํ•จ๋  ์ž๋ฃŒ๋ฅผ ์ œ์ถœํ•˜๊ณ  ์‹ถ์œผ์‹œ๋‹ค๋ฉด PR(Pull Request)๋ฅผ ์—ด์–ด์ฃผ์„ธ์š”. ๋ฆฌ๋ทฐ ํ•ด๋“œ๋ฆฌ๊ฒ ์Šต๋‹ˆ๋‹ค! ์ž๋ฃŒ๋Š” ๊ธฐ์กด ์ž๋ฃŒ๋ฅผ ๋ณต์ œํ•˜๋Š” ๋Œ€์‹  ์ƒˆ๋กœ์šด ๋‚ด์šฉ์„ ๋‹ด๊ณ  ์žˆ์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. - HuggingFace ๋ธ”๋กœ๊ทธ์—์„œ Informer ํฌ์ŠคํŠธ๋ฅผ ํ™•์ธํ•˜์„ธ์š”: [Informer๋ฅผ ํ™œ์šฉํ•œ ๋‹ค๋ณ€๋Ÿ‰ ํ™•๋ฅ ์  ์‹œ๊ณ„์—ด ์˜ˆ์ธก](https://huggingface.co/blog/informer) ## InformerConfig[[transformers.InformerConfig]] [[autodoc]] InformerConfig ## InformerModel[[transformers.InformerModel]] [[autodoc]] InformerModel - forward ## InformerForPrediction[[transformers.InformerForPrediction]] [[autodoc]] InformerForPrediction - forward
transformers/docs/source/ko/model_doc/informer.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/informer.md", "repo_id": "transformers", "token_count": 2642 }
434
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # SigLIP[[siglip]] <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> ## ๊ฐœ์š”[[overview]] SigLIP ๋ชจ๋ธ์€ Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, Lucas Beyer์˜ [Sigmoid Loss for Language Image Pre-Training](https://huggingface.co/papers/2303.15343) ๋…ผ๋ฌธ์—์„œ ์ œ์•ˆ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. SigLIP์€ [CLIP](clip)์—์„œ ์‚ฌ์šฉ๋œ ์†์‹ค ํ•จ์ˆ˜๋ฅผ ๊ฐ„๋‹จํ•œ ์Œ๋ณ„ ์‹œ๊ทธ๋ชจ์ด๋“œ ์†์‹ค(pairwise sigmoid loss)๋กœ ๋Œ€์ฒดํ•  ๊ฒƒ์„ ์ œ์•ˆํ•ฉ๋‹ˆ๋‹ค. ์ด๋Š” ImageNet์—์„œ ์ œ๋กœ์ƒท ๋ถ„๋ฅ˜ ์ •ํ™•๋„ ์ธก๋ฉด์—์„œ ๋” ๋‚˜์€ ์„ฑ๋Šฅ์„ ๋ณด์ž…๋‹ˆ๋‹ค. ๋…ผ๋ฌธ์˜ ์ดˆ๋ก์€ ๋‹ค์Œ๊ณผ ๊ฐ™์Šต๋‹ˆ๋‹ค: *์šฐ๋ฆฌ๋Š” ์–ธ์–ด-์ด๋ฏธ์ง€ ์‚ฌ์ „ ํ•™์Šต(Language-Image Pre-training, SigLIP)์„ ์œ„ํ•œ ๊ฐ„๋‹จํ•œ ์Œ๋ณ„ ์‹œ๊ทธ๋ชจ์ด๋“œ ์†์‹ค์„ ์ œ์•ˆํ•ฉ๋‹ˆ๋‹ค. ์†Œํ”„ํŠธ๋งฅ์Šค ์ •๊ทœํ™”๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ํ‘œ์ค€ ๋Œ€์กฐ ํ•™์Šต๊ณผ ๋‹ฌ๋ฆฌ, ์‹œ๊ทธ๋ชจ์ด๋“œ ์†์‹ค์€ ์ด๋ฏธ์ง€-ํ…์ŠคํŠธ ์Œ์—๋งŒ ์ž‘์šฉํ•˜๋ฉฐ ์ •๊ทœํ™”๋ฅผ ์œ„ํ•ด ์Œ๋ณ„ ์œ ์‚ฌ์„ฑ์˜ ์ „์—ญ์  ๊ด€์ ์„ ํ•„์š”๋กœ ํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ์‹œ๊ทธ๋ชจ์ด๋“œ ์†์‹ค์€ ๋ฐฐ์น˜ ํฌ๊ธฐ๋ฅผ ๋”์šฑ ํ™•์žฅํ•  ์ˆ˜ ์žˆ๊ฒŒ ํ•˜๋Š” ๋™์‹œ์— ์ž‘์€ ๋ฐฐ์น˜ ํฌ๊ธฐ์—์„œ๋„ ๋” ๋‚˜์€ ์„ฑ๋Šฅ์„ ๋ณด์ž…๋‹ˆ๋‹ค. Locked-image Tuning๊ณผ ๊ฒฐํ•ฉํ•˜์—ฌ, ๋‹จ 4๊ฐœ์˜ TPUv4 ์นฉ๋งŒ์œผ๋กœ ์ดํ‹€ ๋งŒ์— 84.5%์˜ ImageNet ์ œ๋กœ์ƒท ์ •ํ™•๋„๋ฅผ ๋‹ฌ์„ฑํ•˜๋Š” SigLiT ๋ชจ๋ธ์„ ํ•™์Šตํ–ˆ์Šต๋‹ˆ๋‹ค. ์†์‹ค ํ•จ์ˆ˜์—์„œ ๋ฐฐ์น˜ ํฌ๊ธฐ๋ฅผ ๋ถ„๋ฆฌํ•จ์œผ๋กœ์จ ์˜ˆ์ œ ๋Œ€ ์Œ์˜ ์˜ํ–ฅ๊ณผ Negative ๋Œ€ Positive ๋น„์œจ์„ ์—ฐ๊ตฌํ•  ์ˆ˜ ์žˆ๊ฒŒ ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ๋งˆ์ง€๋ง‰์œผ๋กœ, ์šฐ๋ฆฌ๋Š” ๋ฐฐ์น˜ ํฌ๊ธฐ๋ฅผ 100๋งŒ ๊ฐœ๊นŒ์ง€ ๊ทน๋‹จ์ ์œผ๋กœ ๋Š˜๋ ค๋ณด์•˜๊ณ , ๋ฐฐ์น˜ ํฌ๊ธฐ ์ฆ๊ฐ€์˜ ์ด์ ์ด ๋น ๋ฅด๊ฒŒ ๊ฐ์†Œํ•˜๋ฉฐ 32k์˜ ๋” ํ•ฉ๋ฆฌ์ ์ธ ๋ฐฐ์น˜ ํฌ๊ธฐ๋กœ๋„ ์ถฉ๋ถ„ํ•˜๋‹ค๋Š” ๊ฒƒ์„ ๋ฐœ๊ฒฌํ–ˆ์Šต๋‹ˆ๋‹ค.* ## ์‚ฌ์šฉ ํŒ[[usage-tips]] - SigLIP์˜ ์‚ฌ์šฉ๋ฒ•์€ [CLIP](clip)๊ณผ ์œ ์‚ฌํ•ฉ๋‹ˆ๋‹ค. ์ฃผ์š” ์ฐจ์ด์ ์€ ํ•™์Šต ์†์‹ค ํ•จ์ˆ˜๋กœ, ๋ฐฐ์น˜ ๋‚ด ๋ชจ๋“  ์ด๋ฏธ์ง€์™€ ํ…์ŠคํŠธ ๊ฐ„์˜ ์Œ๋ณ„ ์œ ์‚ฌ์„ฑ์— ๋Œ€ํ•œ ์ „์—ญ์  ๊ด€์ ์ด ํ•„์š”ํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. ์†Œํ”„ํŠธ๋งฅ์Šค ๋Œ€์‹  ๋กœ์ง“์— ์‹œ๊ทธ๋ชจ์ด๋“œ ํ™œ์„ฑํ™” ํ•จ์ˆ˜๋ฅผ ์ ์šฉํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. - ํ•™์Šต์€ ์ง€์›๋˜์ง€๋งŒ `torch.distributed` ์œ ํ‹ธ๋ฆฌํ‹ฐ๋ฅผ ์‚ฌ์šฉํ•˜์ง€ ์•Š์•„ ๋ฐฐ์น˜ ํฌ๊ธฐ์˜ ํ™•์žฅ์„ฑ์ด ์ œํ•œ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๊ทธ๋Ÿฌ๋‚˜ ๋‹จ์ผ ๋…ธ๋“œ ๋‹ค์ค‘ GPU ์„ค์ •์—์„œ๋Š” DDP์™€ FDSP๊ฐ€ ์ž‘๋™ํ•ฉ๋‹ˆ๋‹ค. - ๋…๋ฆฝํ˜• [`SiglipTokenizer`] ๋˜๋Š” [`SiglipProcessor`]๋ฅผ ์‚ฌ์šฉํ•  ๋•Œ๋Š” ๋ชจ๋ธ์ด ๊ทธ๋ ‡๊ฒŒ ํ•™์Šต๋˜์—ˆ์œผ๋ฏ€๋กœ `padding="max_length"`๋ฅผ ์ „๋‹ฌํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. - ํŒŒ์ดํ”„๋ผ์ธ๊ณผ ๋™์ผํ•œ ๊ฒฐ๊ณผ๋ฅผ ์–ป์œผ๋ ค๋ฉด "This is a photo of {label}."์˜ ํ”„๋กฌํ”„ํŠธ ํ…œํ”Œ๋ฆฟ์„ ์‚ฌ์šฉํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/siglip_table.jpeg" alt="drawing" width="600"/> <small> CLIP๊ณผ ๋น„๊ตํ•œ SigLIP ํ‰๊ฐ€ ๊ฒฐ๊ณผ. <a href="https://huggingface.co/papers/2303.15343">์›๋ณธ ๋…ผ๋ฌธ</a>์—์„œ ๋ฐœ์ทŒ.</small> ์ด ๋ชจ๋ธ์€ [nielsr](https://huggingface.co/nielsr)๊ฐ€ ๊ธฐ์—ฌํ–ˆ์Šต๋‹ˆ๋‹ค. ์›๋ณธ ์ฝ”๋“œ๋Š” [์—ฌ๊ธฐ](https://github.com/google-research/big_vision/tree/main)์—์„œ ์ฐพ์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ## ์‚ฌ์šฉ ์˜ˆ์‹œ[[usage-example]] SigLIP์„ ์‚ฌ์šฉํ•˜๋Š” ๋ฐฉ๋ฒ•์—๋Š” ๋‘ ๊ฐ€์ง€ ์ฃผ์š” ๋ฐฉ๋ฒ•์ด ์žˆ์Šต๋‹ˆ๋‹ค: ๋ชจ๋“  ๋ณต์žก์„ฑ์„ ์ถ”์ƒํ™”ํ•˜๋Š” ํŒŒ์ดํ”„๋ผ์ธ API๋ฅผ ์‚ฌ์šฉํ•˜๊ฑฐ๋‚˜, ์ง์ ‘ `SiglipModel` ํด๋ž˜์Šค๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๋ฐฉ๋ฒ•์ž…๋‹ˆ๋‹ค. ### ํŒŒ์ดํ”„๋ผ์ธ API[[pipeline-API]] ํŒŒ์ดํ”„๋ผ์ธ์„ ์‚ฌ์šฉํ•˜๋ฉด ๋ช‡ ์ค„์˜ ์ฝ”๋“œ๋กœ ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```python >>> from transformers import pipeline >>> from PIL import Image >>> import requests >>> # ํŒŒ์ดํ”„๋ผ์ธ ๋กœ๋“œ >>> image_classifier = pipeline(task="zero-shot-image-classification", model="google/siglip-base-patch16-224") >>> # ์ด๋ฏธ์ง€ ๋กœ๋“œ >>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg' >>> image = Image.open(requests.get(url, stream=True).raw) >>> # ์ถ”๋ก  >>> candidate_labels = ["2 cats", "a plane", "a remote"] >>> outputs = image_classifier(image, candidate_labels=candidate_labels) >>> outputs = [{"score": round(output["score"], 4), "label": output["label"] } for output in outputs] >>> print(outputs) [{'score': 0.1979, 'label': '2 cats'}, {'score': 0.0, 'label': 'a remote'}, {'score': 0.0, 'label': 'a plane'}] ``` ### ์ง์ ‘ ๋ชจ๋ธ ์‚ฌ์šฉํ•˜๊ธฐ[[using-the-model-yourself]] ์ „์ฒ˜๋ฆฌ์™€ ํ›„์ฒ˜๋ฆฌ๋ฅผ ์ง์ ‘ ์ˆ˜ํ–‰ํ•˜๋ ค๋ฉด ๋‹ค์Œ๊ณผ ๊ฐ™์ด ํ•˜๋ฉด ๋ฉ๋‹ˆ๋‹ค: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, AutoModel >>> import torch >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224") >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> candidate_labels = ["2 cats", "2 dogs"] # ํŒŒ์ดํ”„๋ผ์ธ ํ”„๋กฌํ”„ํŠธ ํ…œํ”Œ๋ฆฟ์„ ๋”ฐ๋ผ ๋™์ผํ•œ ๊ฒฐ๊ณผ๋ฅผ ์–ป์Šต๋‹ˆ๋‹ค >>> texts = [f'This is a photo of {label}.' for label in candidate_labels] # ์ค‘์š”: ๋ชจ๋ธ์ด ์ด๋ ‡๊ฒŒ ํ•™์Šต๋˜์—ˆ์œผ๋ฏ€๋กœ `padding=max_length`๋ฅผ ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค >>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image >>> probs = torch.sigmoid(logits_per_image) # ์‹œ๊ทธ๋ชจ์ด๋“œ ํ™œ์„ฑํ™” ํ•จ์ˆ˜๋ฅผ ์ ์šฉํ•œ ํ™•๋ฅ ์ž…๋‹ˆ๋‹ค >>> print(f"{probs[0][0]:.1%} that image 0 is '{candidate_labels[0]}'") 19.8% that image 0 is '2 cats' ``` ## ๋ฆฌ์†Œ์Šค[[resources]] SigLIP์„ ์‹œ์ž‘ํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋˜๋Š” ๊ณต์‹ Hugging Face ๋ฐ ์ปค๋ฎค๋‹ˆํ‹ฐ(๐ŸŒŽ๋กœ ํ‘œ์‹œ) ๋ฆฌ์†Œ์Šค ๋ชฉ๋ก์ž…๋‹ˆ๋‹ค. - [์ œ๋กœ์ƒท ์ด๋ฏธ์ง€ ๋ถ„๋ฅ˜ ์ž‘์—… ๊ฐ€์ด๋“œ](../tasks/zero_shot_image_classification) - SigLIP์— ๋Œ€ํ•œ ๋ฐ๋ชจ ๋…ธํŠธ๋ถ์€ [์—ฌ๊ธฐ](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/SigLIP)์—์„œ ์ฐพ์„ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๐ŸŒŽ ์—ฌ๊ธฐ์— ํฌํ•จ๋  ๋ฆฌ์†Œ์Šค๋ฅผ ์ œ์ถœํ•˜๋Š” ๋ฐ ๊ด€์‹ฌ์ด ์žˆ์œผ์‹œ๋ฉด Pull Request๋ฅผ ์—ด์–ด์ฃผ์‹œ๋ฉด ๊ฒ€ํ† ํ•˜๊ฒ ์Šต๋‹ˆ๋‹ค! ๋ฆฌ์†Œ์Šค๋Š” ์ด์ƒ์ ์œผ๋กœ ๊ธฐ์กด ๋ฆฌ์†Œ์Šค๋ฅผ ๋ณต์ œํ•˜๋Š” ๋Œ€์‹  ์ƒˆ๋กœ์šด ๊ฒƒ์„ ๋ณด์—ฌ์ฃผ์–ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ## SigLIP๊ณผ Flash Attention 2 ๊ฒฐํ•ฉํ•˜๊ธฐ[[combining-siglip-with-flash-attention-2]] ๋จผ์ € Flash Attention 2์˜ ์ตœ์‹  ๋ฒ„์ „์„ ์„ค์น˜ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ```bash pip install -U flash-attn --no-build-isolation ``` ๋˜ํ•œ Flash-Attention 2์™€ ํ˜ธํ™˜๋˜๋Š” ํ•˜๋“œ์›จ์–ด๊ฐ€ ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”. flash-attn ์ €์žฅ์†Œ์˜ ๊ณต์‹ ๋ฌธ์„œ์—์„œ ์ž์„ธํžˆ ์•Œ์•„๋ณด์„ธ์š”. ๋˜ํ•œ ๋ชจ๋ธ์„ ๋ฐ˜์ •๋ฐ€๋„(์˜ˆ: `torch.float16`)๋กœ ๋กœ๋“œํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. Flash Attention 2๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ์„ ๋กœ๋“œํ•˜๊ณ  ์‹คํ–‰ํ•˜๋ ค๋ฉด ์•„๋ž˜ ์ฝ”๋“œ๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”: ```python >>> import torch >>> import requests >>> from PIL import Image >>> from transformers import SiglipProcessor, SiglipModel >>> device = "cuda" # ๋ชจ๋ธ์„ ๋กœ๋“œํ•  ์žฅ์น˜ >>> model = SiglipModel.from_pretrained( ... "google/siglip-so400m-patch14-384", ... attn_implementation="flash_attention_2", ... dtype=torch.float16, ... device_map=device, ... ) >>> processor = SiglipProcessor.from_pretrained("google/siglip-so400m-patch14-384") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> candidate_labels = ["2 cats", "2 dogs"] # ํŒŒ์ดํ”„๋ผ์ธ ํ”„๋กฌํ”„ํŠธ ํ…œํ”Œ๋ฆฟ์„ ๋”ฐ๋ผ ๋™์ผํ•œ ๊ฒฐ๊ณผ๋ฅผ ์–ป์Šต๋‹ˆ๋‹ค >>> texts = [f'This is a photo of {label}.' for label in candidate_labels] # ์ค‘์š”: ๋ชจ๋ธ์ด ์ด๋ ‡๊ฒŒ ํ•™์Šต๋˜์—ˆ์œผ๋ฏ€๋กœ `padding=max_length`๋ฅผ ์ „๋‹ฌํ•ฉ๋‹ˆ๋‹ค >>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt").to(device) >>> with torch.no_grad(): ... with torch.autocast(device): ... outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image >>> probs = torch.sigmoid(logits_per_image) # ์‹œ๊ทธ๋ชจ์ด๋“œ ํ™œ์„ฑํ™” ํ•จ์ˆ˜๋ฅผ ์ ์šฉํ•œ ํ™•๋ฅ ์ž…๋‹ˆ๋‹ค >>> print(f"{probs[0][0]:.1%} that image 0 is '{candidate_labels[0]}'") 19.8% that image 0 is '2 cats' ``` ## Scaled Dot Product Attention(SDPA) ์‚ฌ์šฉํ•˜๊ธฐ[using-scaled-dot-product-attention(SDPA)]] PyTorch๋Š” `torch.nn.functional`์˜ ์ผ๋ถ€๋กœ ์Šค์ผ€์ผ๋œ ์ ๊ณฑ ์–ดํ…์…˜(SDPA) ์—ฐ์‚ฐ์ž๋ฅผ ํฌํ•จํ•ฉ๋‹ˆ๋‹ค. ์ด ํ•จ์ˆ˜๋Š” ์ž…๋ ฅ๊ณผ ์‚ฌ์šฉ ์ค‘์ธ ํ•˜๋“œ์›จ์–ด์— ๋”ฐ๋ผ ์ ์šฉํ•  ์ˆ˜ ์žˆ๋Š” ์—ฌ๋Ÿฌ ๊ตฌํ˜„์„ ํฌํ•จํ•ฉ๋‹ˆ๋‹ค. ์ž์„ธํ•œ ๋‚ด์šฉ์€ [๊ณต์‹ ๋ฌธ์„œ](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) ๋˜๋Š” [GPU ์ถ”๋ก ](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention) ํŽ˜์ด์ง€๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. `from_pretrained()`์—์„œ `attn_implementation="sdpa"`๋ฅผ ์„ค์ •ํ•˜์—ฌ SDPA๋ฅผ ๋ช…์‹œ์ ์œผ๋กœ ์š”์ฒญํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. `torch>=2.1.1`์ด ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”. ```python >>> from transformers import SiglipModel >>> model = SiglipModel.from_pretrained( ... "google/siglip-so400m-patch14-384", ... attn_implementation="sdpa", ... dtype=torch.float16, ... device_map=device, ... ) ``` ์ตœ์ƒ์˜ ์†๋„ ํ–ฅ์ƒ์„ ์œ„ํ•ด ๋ชจ๋ธ์„ ๋ฐ˜์ •๋ฐ€๋„(์˜ˆ: `torch.float16` ๋˜๋Š” `torch.bfloat16`)๋กœ ๋กœ๋“œํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค. ## ์˜ˆ์ƒ ์†๋„ ํ–ฅ์ƒ[[expected-speedups]] ์•„๋ž˜๋Š” `google/siglip-so400m-patch14-384` ์ฒดํฌํฌ์ธํŠธ๋ฅผ `float16` ์ •๋ฐ€๋„๋กœ ์‚ฌ์šฉํ•˜๋Š” transformers์˜ ๋„ค์ดํ‹ฐ๋ธŒ ๊ตฌํ˜„๊ณผ Flash Attention 2 / SDPA ๋ฒ„์ „์˜ ๋ชจ๋ธ์„ ๋‹ค์–‘ํ•œ ๋ฐฐ์น˜ ํฌ๊ธฐ๋กœ ๋น„๊ตํ•œ ์ถ”๋ก  ์‹œ๊ฐ„์˜ ์˜ˆ์ƒ ์†๋„ ํ–ฅ์ƒ ๋‹ค์ด์–ด๊ทธ๋žจ์ž…๋‹ˆ๋‹ค. <div style="text-align: center"> <img src="https://i.imgur.com/cWm4rsn.png"> </div> ## SiglipConfig [[autodoc]] SiglipConfig - from_text_vision_configs ## SiglipTextConfig [[autodoc]] SiglipTextConfig ## SiglipVisionConfig [[autodoc]] SiglipVisionConfig ## SiglipTokenizer [[autodoc]] SiglipTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## SiglipImageProcessor [[autodoc]] SiglipImageProcessor - preprocess ## SiglipImageProcessorFast [[autodoc]] SiglipImageProcessorFast - preprocess ## SiglipProcessor [[autodoc]] SiglipProcessor ## SiglipModel [[autodoc]] SiglipModel - forward - get_text_features - get_image_features ## SiglipTextModel [[autodoc]] SiglipTextModel - forward ## SiglipVisionModel [[autodoc]] SiglipVisionModel - forward ## SiglipForImageClassification [[autodoc]] SiglipForImageClassification - forward
transformers/docs/source/ko/model_doc/siglip.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/siglip.md", "repo_id": "transformers", "token_count": 6533 }
435
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ๐Ÿค— PEFT๋กœ ์–ด๋Œ‘ํ„ฐ ๊ฐ€์ ธ์˜ค๊ธฐ [[load-adapters-with-peft]] [[open-in-colab]] [Parameter-Efficient Fine Tuning (PEFT)](https://huggingface.co/blog/peft) ๋ฐฉ๋ฒ•์€ ์‚ฌ์ „ํ›ˆ๋ จ๋œ ๋ชจ๋ธ์˜ ๋งค๊ฐœ๋ณ€์ˆ˜๋ฅผ ๋ฏธ์„ธ ์กฐ์ • ์ค‘ ๊ณ ์ •์‹œํ‚ค๊ณ , ๊ทธ ์œ„์— ํ›ˆ๋ จํ•  ์ˆ˜ ์žˆ๋Š” ๋งค์šฐ ์ ์€ ์ˆ˜์˜ ๋งค๊ฐœ๋ณ€์ˆ˜(์–ด๋Œ‘ํ„ฐ)๋ฅผ ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค. ์–ด๋Œ‘ํ„ฐ๋Š” ์ž‘์—…๋ณ„ ์ •๋ณด๋ฅผ ํ•™์Šตํ•˜๋„๋ก ํ›ˆ๋ จ๋ฉ๋‹ˆ๋‹ค. ์ด ์ ‘๊ทผ ๋ฐฉ์‹์€ ์™„์ „ํžˆ ๋ฏธ์„ธ ์กฐ์ •๋œ ๋ชจ๋ธ์— ํ•„์ ํ•˜๋Š” ๊ฒฐ๊ณผ๋ฅผ ์ƒ์„ฑํ•˜๋ฉด์„œ, ๋ฉ”๋ชจ๋ฆฌ ํšจ์œจ์ ์ด๊ณ  ๋น„๊ต์  ์ ์€ ์ปดํ“จํŒ… ๋ฆฌ์†Œ์Šค๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ๋˜ํ•œ PEFT๋กœ ํ›ˆ๋ จ๋œ ์–ด๋Œ‘ํ„ฐ๋Š” ์ผ๋ฐ˜์ ์œผ๋กœ ์ „์ฒด ๋ชจ๋ธ๋ณด๋‹ค ํ›จ์”ฌ ์ž‘๊ธฐ ๋•Œ๋ฌธ์— ๊ณต์œ , ์ €์žฅ ๋ฐ ๊ฐ€์ ธ์˜ค๊ธฐ๊ฐ€ ํŽธ๋ฆฌํ•ฉ๋‹ˆ๋‹ค. <div class="flex flex-col justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/> <figcaption class="text-center">Hub์— ์ €์žฅ๋œ OPTForCausalLM ๋ชจ๋ธ์˜ ์–ด๋Œ‘ํ„ฐ ๊ฐ€์ค‘์น˜๋Š” ์ตœ๋Œ€ 700MB์— ๋‹ฌํ•˜๋Š” ๋ชจ๋ธ ๊ฐ€์ค‘์น˜์˜ ์ „์ฒด ํฌ๊ธฐ์— ๋น„ํ•ด ์•ฝ 6MB์— ๋ถˆ๊ณผํ•ฉ๋‹ˆ๋‹ค.</figcaption> </div> ๐Ÿค— PEFT ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์— ๋Œ€ํ•ด ์ž์„ธํžˆ ์•Œ์•„๋ณด๋ ค๋ฉด [๋ฌธ์„œ](https://huggingface.co/docs/peft/index)๋ฅผ ํ™•์ธํ•˜์„ธ์š”. ## ์„ค์ • [[setup]] ๐Ÿค— PEFT๋ฅผ ์„ค์น˜ํ•˜์—ฌ ์‹œ์ž‘ํ•˜์„ธ์š”: ```bash pip install peft ``` ์ƒˆ๋กœ์šด ๊ธฐ๋Šฅ์„ ์‚ฌ์šฉํ•ด๋ณด๊ณ  ์‹ถ๋‹ค๋ฉด, ๋‹ค์Œ ์†Œ์Šค์—์„œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์„ค์น˜ํ•˜๋Š” ๊ฒƒ์ด ์ข‹์Šต๋‹ˆ๋‹ค: ```bash pip install git+https://github.com/huggingface/peft.git ``` ## ์ง€์›๋˜๋Š” PEFT ๋ชจ๋ธ [[supported-peft-models]] ๐Ÿค— Transformers๋Š” ๊ธฐ๋ณธ์ ์œผ๋กœ ์ผ๋ถ€ PEFT ๋ฐฉ๋ฒ•์„ ์ง€์›ํ•˜๋ฉฐ, ๋กœ์ปฌ์ด๋‚˜ Hub์— ์ €์žฅ๋œ ์–ด๋Œ‘ํ„ฐ ๊ฐ€์ค‘์น˜๋ฅผ ๊ฐ€์ ธ์˜ค๊ณ  ๋ช‡ ์ค„์˜ ์ฝ”๋“œ๋งŒ์œผ๋กœ ์‰ฝ๊ฒŒ ์‹คํ–‰ํ•˜๊ฑฐ๋‚˜ ํ›ˆ๋ จํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋‹ค์Œ ๋ฐฉ๋ฒ•์„ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค: - [Low Rank Adapters](https://huggingface.co/docs/peft/conceptual_guides/lora) - [IA3](https://huggingface.co/docs/peft/conceptual_guides/ia3) - [AdaLoRA](https://huggingface.co/papers/2303.10512) ๐Ÿค— PEFT์™€ ๊ด€๋ จ๋œ ๋‹ค๋ฅธ ๋ฐฉ๋ฒ•(์˜ˆ: ํ”„๋กฌํ”„ํŠธ ํ›ˆ๋ จ ๋˜๋Š” ํ”„๋กฌํ”„ํŠธ ํŠœ๋‹) ๋˜๋Š” ์ผ๋ฐ˜์ ์ธ ๐Ÿค— PEFT ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์— ๋Œ€ํ•ด ์ž์„ธํžˆ ์•Œ์•„๋ณด๋ ค๋ฉด [๋ฌธ์„œ](https://huggingface.co/docs/peft/index)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ## PEFT ์–ด๋Œ‘ํ„ฐ ๊ฐ€์ ธ์˜ค๊ธฐ [[load-a-peft-adapter]] ๐Ÿค— Transformers์—์„œ PEFT ์–ด๋Œ‘ํ„ฐ ๋ชจ๋ธ์„ ๊ฐ€์ ธ์˜ค๊ณ  ์‚ฌ์šฉํ•˜๋ ค๋ฉด Hub ์ €์žฅ์†Œ๋‚˜ ๋กœ์ปฌ ๋””๋ ‰ํ„ฐ๋ฆฌ์— `adapter_config.json` ํŒŒ์ผ๊ณผ ์–ด๋Œ‘ํ„ฐ ๊ฐ€์ค‘์น˜๊ฐ€ ํฌํ•จ๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์‹ญ์‹œ์˜ค. ๊ทธ๋Ÿฐ ๋‹ค์Œ `AutoModelFor` ํด๋ž˜์Šค๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ PEFT ์–ด๋Œ‘ํ„ฐ ๋ชจ๋ธ์„ ๊ฐ€์ ธ์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด ์ธ๊ณผ ๊ด€๊ณ„ ์–ธ์–ด ๋ชจ๋ธ์šฉ PEFT ์–ด๋Œ‘ํ„ฐ ๋ชจ๋ธ์„ ๊ฐ€์ ธ์˜ค๋ ค๋ฉด ๋‹ค์Œ ๋‹จ๊ณ„๋ฅผ ๋”ฐ๋ฅด์‹ญ์‹œ์˜ค: 1. PEFT ๋ชจ๋ธ ID๋ฅผ ์ง€์ •ํ•˜์‹ญ์‹œ์˜ค. 2. [`AutoModelForCausalLM`] ํด๋ž˜์Šค์— ์ „๋‹ฌํ•˜์‹ญ์‹œ์˜ค. ```py from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(peft_model_id) ``` <Tip> `AutoModelFor` ํด๋ž˜์Šค๋‚˜ ๊ธฐ๋ณธ ๋ชจ๋ธ ํด๋ž˜์Šค(์˜ˆ: `OPTForCausalLM` ๋˜๋Š” `LlamaForCausalLM`) ์ค‘ ํ•˜๋‚˜๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ PEFT ์–ด๋Œ‘ํ„ฐ๋ฅผ ๊ฐ€์ ธ์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. </Tip> `load_adapter` ๋ฉ”์†Œ๋“œ๋ฅผ ํ˜ธ์ถœํ•˜์—ฌ PEFT ์–ด๋Œ‘ํ„ฐ๋ฅผ ๊ฐ€์ ธ์˜ฌ ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค. ```py from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "facebook/opt-350m" peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(model_id) model.load_adapter(peft_model_id) ``` ## 8๋น„ํŠธ ๋˜๋Š” 4๋น„ํŠธ๋กœ ๊ฐ€์ ธ์˜ค๊ธฐ [[load-in-8bit-or-4bit]] `bitsandbytes` ํ†ตํ•ฉ์€ 8๋น„ํŠธ์™€ 4๋น„ํŠธ ์ •๋ฐ€๋„ ๋ฐ์ดํ„ฐ ์œ ํ˜•์„ ์ง€์›ํ•˜๋ฏ€๋กœ ํฐ ๋ชจ๋ธ์„ ๊ฐ€์ ธ์˜ฌ ๋•Œ ์œ ์šฉํ•˜๋ฉด์„œ ๋ฉ”๋ชจ๋ฆฌ๋„ ์ ˆ์•ฝํ•ฉ๋‹ˆ๋‹ค. ๋ชจ๋ธ์„ ํ•˜๋“œ์›จ์–ด์— ํšจ๊ณผ์ ์œผ๋กœ ๋ถ„๋ฐฐํ•˜๋ ค๋ฉด [`~PreTrainedModel.from_pretrained`]์— `load_in_8bit` ๋˜๋Š” `load_in_4bit` ๋งค๊ฐœ๋ณ€์ˆ˜๋ฅผ ์ถ”๊ฐ€ํ•˜๊ณ  `device_map="auto"`๋ฅผ ์„ค์ •ํ•˜์„ธ์š”: ```py from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(peft_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` ## ์ƒˆ ์–ด๋Œ‘ํ„ฐ ์ถ”๊ฐ€ [[add-a-new-adapter]] ์ƒˆ ์–ด๋Œ‘ํ„ฐ๊ฐ€ ํ˜„์žฌ ์–ด๋Œ‘ํ„ฐ์™€ ๋™์ผํ•œ ์œ ํ˜•์ธ ๊ฒฝ์šฐ์— ํ•œํ•ด ๊ธฐ์กด ์–ด๋Œ‘ํ„ฐ๊ฐ€ ์žˆ๋Š” ๋ชจ๋ธ์— ์ƒˆ ์–ด๋Œ‘ํ„ฐ๋ฅผ ์ถ”๊ฐ€ํ•˜๋ ค๋ฉด [`~peft.PeftModel.add_adapter`]๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด ๋ชจ๋ธ์— ๊ธฐ์กด LoRA ์–ด๋Œ‘ํ„ฐ๊ฐ€ ์—ฐ๊ฒฐ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ: ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer from peft import PeftConfig model_id = "facebook/opt-350m" model = AutoModelForCausalLM.from_pretrained(model_id) lora_config = LoraConfig( target_modules=["q_proj", "k_proj"], init_lora_weights=False ) model.add_adapter(lora_config, adapter_name="adapter_1") ``` ์ƒˆ ์–ด๋Œ‘ํ„ฐ๋ฅผ ์ถ”๊ฐ€ํ•˜๋ ค๋ฉด: ```py # attach new adapter with same config model.add_adapter(lora_config, adapter_name="adapter_2") ``` ์ด์ œ [`~peft.PeftModel.set_adapter`]๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์–ด๋Œ‘ํ„ฐ๋ฅผ ์‚ฌ์šฉํ•  ์–ด๋Œ‘ํ„ฐ๋กœ ์„ค์ •ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py # use adapter_1 model.set_adapter("adapter_1") output = model.generate(**inputs) print(tokenizer.decode(output_disabled[0], skip_special_tokens=True)) # use adapter_2 model.set_adapter("adapter_2") output_enabled = model.generate(**inputs) print(tokenizer.decode(output_enabled[0], skip_special_tokens=True)) ``` ## ์–ด๋Œ‘ํ„ฐ ํ™œ์„ฑํ™” ๋ฐ ๋น„ํ™œ์„ฑํ™” [[enable-and-disable-adapters]] ๋ชจ๋ธ์— ์–ด๋Œ‘ํ„ฐ๋ฅผ ์ถ”๊ฐ€ํ•œ ํ›„ ์–ด๋Œ‘ํ„ฐ ๋ชจ๋“ˆ์„ ํ™œ์„ฑํ™” ๋˜๋Š” ๋น„ํ™œ์„ฑํ™”ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์–ด๋Œ‘ํ„ฐ ๋ชจ๋“ˆ์„ ํ™œ์„ฑํ™”ํ•˜๋ ค๋ฉด: ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer from peft import PeftConfig model_id = "facebook/opt-350m" adapter_model_id = "ybelkada/opt-350m-lora" tokenizer = AutoTokenizer.from_pretrained(model_id) text = "Hello" inputs = tokenizer(text, return_tensors="pt") model = AutoModelForCausalLM.from_pretrained(model_id) peft_config = PeftConfig.from_pretrained(adapter_model_id) # to initiate with random weights peft_config.init_lora_weights = False model.add_adapter(peft_config) model.enable_adapters() output = model.generate(**inputs) ``` ์–ด๋Œ‘ํ„ฐ ๋ชจ๋“ˆ์„ ๋น„ํ™œ์„ฑํ™”ํ•˜๋ ค๋ฉด: ```py model.disable_adapters() output = model.generate(**inputs) ``` ## PEFT ์–ด๋Œ‘ํ„ฐ ํ›ˆ๋ จ [[train-a-peft-adapter]] PEFT ์–ด๋Œ‘ํ„ฐ๋Š” [`Trainer`] ํด๋ž˜์Šค์—์„œ ์ง€์›๋˜๋ฏ€๋กœ ํŠน์ • ์‚ฌ์šฉ ์‚ฌ๋ก€์— ๋งž๊ฒŒ ์–ด๋Œ‘ํ„ฐ๋ฅผ ํ›ˆ๋ จํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋ช‡ ์ค„์˜ ์ฝ”๋“œ๋ฅผ ์ถ”๊ฐ€ํ•˜๊ธฐ๋งŒ ํ•˜๋ฉด ๋ฉ๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด LoRA ์–ด๋Œ‘ํ„ฐ๋ฅผ ํ›ˆ๋ จํ•˜๋ ค๋ฉด: <Tip> [`Trainer`]๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ์„ ๋ฏธ์„ธ ์กฐ์ •ํ•˜๋Š” ๊ฒƒ์ด ์ต์ˆ™ํ•˜์ง€ ์•Š๋‹ค๋ฉด [์‚ฌ์ „ํ›ˆ๋ จ๋œ ๋ชจ๋ธ์„ ๋ฏธ์„ธ ์กฐ์ •ํ•˜๊ธฐ](training) ํŠœํ† ๋ฆฌ์–ผ์„ ํ™•์ธํ•˜์„ธ์š”. </Tip> 1. ์ž‘์—… ์œ ํ˜• ๋ฐ ํ•˜์ดํผํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ์ง€์ •ํ•˜์—ฌ ์–ด๋Œ‘ํ„ฐ ๊ตฌ์„ฑ์„ ์ •์˜ํ•ฉ๋‹ˆ๋‹ค. ํ•˜์ดํผํŒŒ๋ผ๋ฏธํ„ฐ์— ๋Œ€ํ•œ ์ž์„ธํ•œ ๋‚ด์šฉ์€ [`~peft.LoraConfig`]๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. ```py from peft import LoraConfig peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", task_type="CAUSAL_LM", ) ``` 2. ๋ชจ๋ธ์— ์–ด๋Œ‘ํ„ฐ๋ฅผ ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค. ```py model.add_adapter(peft_config) ``` 3. ์ด์ œ ๋ชจ๋ธ์„ [`Trainer`]์— ์ „๋‹ฌํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค! ```py trainer = Trainer(model=model, ...) trainer.train() ``` ํ›ˆ๋ จํ•œ ์–ด๋Œ‘ํ„ฐ๋ฅผ ์ €์žฅํ•˜๊ณ  ๋‹ค์‹œ ๊ฐ€์ ธ์˜ค๋ ค๋ฉด: ```py model.save_pretrained(save_dir) model = AutoModelForCausalLM.from_pretrained(save_dir) ```
transformers/docs/source/ko/peft.md/0
{ "file_path": "transformers/docs/source/ko/peft.md", "repo_id": "transformers", "token_count": 5060 }
436
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AWQ [[awq]] <Tip> ์ด [๋…ธํŠธ๋ถ](https://colab.research.google.com/drive/1HzZH89yAXJaZgwJDhQj9LqSBux932BvY) ์œผ๋กœ AWQ ์–‘์žํ™”๋ฅผ ์‹ค์Šตํ•ด๋ณด์„ธ์š” ! </Tip> [Activation-aware Weight Quantization (AWQ)](https://hf.co/papers/2306.00978)์€ ๋ชจ๋ธ์˜ ๋ชจ๋“  ๊ฐ€์ค‘์น˜๋ฅผ ์–‘์žํ™”ํ•˜์ง€ ์•Š๊ณ , LLM ์„ฑ๋Šฅ์— ์ค‘์š”ํ•œ ๊ฐ€์ค‘์น˜๋ฅผ ์œ ์ง€ํ•ฉ๋‹ˆ๋‹ค. ์ด๋กœ์จ 4๋น„ํŠธ ์ •๋ฐ€๋„๋กœ ๋ชจ๋ธ์„ ์‹คํ–‰ํ•ด๋„ ์„ฑ๋Šฅ ์ €ํ•˜ ์—†์ด ์–‘์žํ™” ์†์‹ค์„ ํฌ๊ฒŒ ์ค„์ผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. AWQ ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ์„ ์–‘์žํ™”ํ•  ์ˆ˜ ์žˆ๋Š” ์—ฌ๋Ÿฌ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด [llm-awq](https://github.com/mit-han-lab/llm-awq), [autoawq](https://github.com/casper-hansen/AutoAWQ) , [optimum-intel](https://huggingface.co/docs/optimum/main/en/intel/optimization_inc) ๋“ฑ์ด ์žˆ์Šต๋‹ˆ๋‹ค. Transformers๋Š” llm-awq, autoawq ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์ด์šฉํ•ด ์–‘์žํ™”๋œ ๋ชจ๋ธ์„ ๊ฐ€์ ธ์˜ฌ ์ˆ˜ ์žˆ๋„๋ก ์ง€์›ํ•ฉ๋‹ˆ๋‹ค. ์ด ๊ฐ€์ด๋“œ์—์„œ๋Š” autoawq๋กœ ์–‘์žํ™”๋œ ๋ชจ๋ธ์„ ๊ฐ€์ ธ์˜ค๋Š” ๋ฐฉ๋ฒ•์„ ๋ณด์—ฌ๋“œ๋ฆฌ๋‚˜, llm-awq๋กœ ์–‘์žํ™”๋œ ๋ชจ๋ธ์˜ ๊ฒฝ์šฐ๋„ ์œ ์‚ฌํ•œ ์ ˆ์ฐจ๋ฅผ ๋”ฐ๋ฆ…๋‹ˆ๋‹ค. autoawq๊ฐ€ ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”: ```bash pip install autoawq ``` AWQ ์–‘์žํ™”๋œ ๋ชจ๋ธ์€ ํ•ด๋‹น ๋ชจ๋ธ์˜ [config.json](https://huggingface.co/TheBloke/zephyr-7B-alpha-AWQ/blob/main/config.json) ํŒŒ์ผ์˜ `quantization_config` ์†์„ฑ์„ ํ†ตํ•ด ์‹๋ณ„ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.: ```json { "_name_or_path": "/workspace/process/huggingfaceh4_zephyr-7b-alpha/source", "architectures": [ "MistralForCausalLM" ], ... ... ... "quantization_config": { "quant_method": "awq", "zero_point": true, "group_size": 128, "bits": 4, "version": "gemm" } } ``` ์–‘์žํ™”๋œ ๋ชจ๋ธ์€ [`~PreTrainedModel.from_pretrained`] ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๊ฐ€์ ธ์˜ต๋‹ˆ๋‹ค. ๋ชจ๋ธ์„ CPU์— ๊ฐ€์ ธ์™”๋‹ค๋ฉด, ๋จผ์ € ๋ชจ๋ธ์„ GPU ์žฅ์น˜๋กœ ์˜ฎ๊ฒจ์•ผ ํ•ฉ๋‹ˆ๋‹ค. `device_map` ํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ์„ ๋ฐฐ์น˜ํ•  ์œ„์น˜๋ฅผ ์ง€์ •ํ•˜์„ธ์š”: ```py from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "TheBloke/zephyr-7B-alpha-AWQ" model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cuda:0") ``` AWQ ์–‘์žํ™” ๋ชจ๋ธ์„ ๊ฐ€์ ธ์˜ค๋ฉด ์ž๋™์œผ๋กœ ์„ฑ๋Šฅ์ƒ์˜ ์ด์œ ๋กœ ์ธํ•ด ๊ฐ€์ค‘์น˜๋“ค์˜ ๊ธฐ๋ณธ๊ฐ’์ด fp16์œผ๋กœ ์„ค์ •๋ฉ๋‹ˆ๋‹ค. ๊ฐ€์ค‘์น˜๋ฅผ ๋‹ค๋ฅธ ํ˜•์‹์œผ๋กœ ๊ฐ€์ ธ์˜ค๋ ค๋ฉด, `dtype` ํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ์‚ฌ์šฉํ•˜์„ธ์š”: ```py from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "TheBloke/zephyr-7B-alpha-AWQ" model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float32) ``` ์ถ”๋ก ์„ ๋”์šฑ ๊ฐ€์†ํ™”ํ•˜๊ธฐ ์œ„ํ•ด AWQ ์–‘์žํ™”์™€ [FlashAttention-2](../perf_infer_gpu_one#flashattention-2) ๋ฅผ ๊ฒฐํ•ฉ ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค: ```py from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("TheBloke/zephyr-7B-alpha-AWQ", attn_implementation="flash_attention_2", device_map="cuda:0") ``` ## ํ“จ์ฆˆ๋œ ๋ชจ๋“ˆ [[fused-modules]] ํ“จ์ฆˆ๋œ ๋ชจ๋“ˆ์€ ์ •ํ™•๋„์™€ ์„ฑ๋Šฅ์„ ๊ฐœ์„ ํ•ฉ๋‹ˆ๋‹ค. ํ“จ์ฆˆ๋œ ๋ชจ๋“ˆ์€ [Llama](https://huggingface.co/meta-llama) ์•„ํ‚คํ…์ฒ˜์™€ [Mistral](https://huggingface.co/mistralai/Mistral-7B-v0.1) ์•„ํ‚คํ…์ฒ˜์˜ AWQ๋ชจ๋“ˆ์— ๊ธฐ๋ณธ์ ์œผ๋กœ ์ง€์›๋ฉ๋‹ˆ๋‹ค. ๊ทธ๋Ÿฌ๋‚˜ ์ง€์›๋˜์ง€ ์•Š๋Š” ์•„ํ‚คํ…์ฒ˜์— ๋Œ€ํ•ด์„œ๋„ AWQ ๋ชจ๋“ˆ์„ ํ“จ์ฆˆํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. <Tip warning={true}> ํ“จ์ฆˆ๋œ ๋ชจ๋“ˆ์€ FlashAttention-2์™€ ๊ฐ™์€ ๋‹ค๋ฅธ ์ตœ์ ํ™” ๊ธฐ์ˆ ๊ณผ ๊ฒฐํ•ฉํ•  ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค. </Tip> <hfoptions id="fuse"> <hfoption id="supported architectures"> ์ง€์›๋˜๋Š” ์•„ํ‚คํ…์ฒ˜์—์„œ ํ“จ์ฆˆ๋œ ๋ชจ๋“ˆ์„ ํ™œ์„ฑํ™”ํ•˜๋ ค๋ฉด, [`AwqConfig`] ๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ๋งค๊ฐœ๋ณ€์ˆ˜ `fuse_max_seq_len` ๊ณผ `do_fuse=True`๋ฅผ ์„ค์ •ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. `fuse_max_seq_len` ๋งค๊ฐœ๋ณ€์ˆ˜๋Š” ์ „์ฒด ์‹œํ€€์Šค ๊ธธ์ด๋กœ, ์ปจํ…์ŠคํŠธ ๊ธธ์ด์™€ ์˜ˆ์ƒ ์ƒ์„ฑ ๊ธธ์ด๋ฅผ ํฌํ•จํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์•ˆ์ „ํ•˜๊ฒŒ ์‚ฌ์šฉํ•˜๊ธฐ ์œ„ํ•ด ๋” ํฐ ๊ฐ’์œผ๋กœ ์„ค์ •ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด, [TheBloke/Mistral-7B-OpenOrca-AWQ](https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-AWQ) ๋ชจ๋ธ์˜ AWQ ๋ชจ๋“ˆ์„ ํ“จ์ฆˆํ•ด๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค. ```python import torch from transformers import AwqConfig, AutoModelForCausalLM model_id = "TheBloke/Mistral-7B-OpenOrca-AWQ" quantization_config = AwqConfig( bits=4, fuse_max_seq_len=512, do_fuse=True, ) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config).to(0) ``` [TheBloke/Mistral-7B-OpenOrca-AWQ](https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-AWQ) ๋ชจ๋ธ์€ ํ“จ์ฆˆ๋œ ๋ชจ๋“ˆ์ด ์žˆ๋Š” ๊ฒฝ์šฐ์™€ ์—†๋Š” ๊ฒฝ์šฐ ๋ชจ๋‘ `batch_size=1` ๋กœ ์„ฑ๋Šฅ ํ‰๊ฐ€๋˜์—ˆ์Šต๋‹ˆ๋‹ค. <figcaption class="text-center text-gray-500 text-lg">ํ“จ์ฆˆ๋˜์ง€ ์•Š์€ ๋ชจ๋“ˆ</figcaption> | ๋ฐฐ์น˜ ํฌ๊ธฐ | ํ”„๋ฆฌํ•„ ๊ธธ์ด | ๋””์ฝ”๋“œ ๊ธธ์ด | ํ”„๋ฆฌํ•„ ํ† ํฐ/์ดˆ | ๋””์ฝ”๋“œ ํ† ํฐ/์ดˆ | ๋ฉ”๋ชจ๋ฆฌ (VRAM) | |-------------:|-----------------:|----------------:|-------------------:|------------------:|:----------------| | 1 | 32 | 32 | 60.0984 | 38.4537 | 4.50 GB (5.68%) | | 1 | 64 | 64 | 1333.67 | 31.6604 | 4.50 GB (5.68%) | | 1 | 128 | 128 | 2434.06 | 31.6272 | 4.50 GB (5.68%) | | 1 | 256 | 256 | 3072.26 | 38.1731 | 4.50 GB (5.68%) | | 1 | 512 | 512 | 3184.74 | 31.6819 | 4.59 GB (5.80%) | | 1 | 1024 | 1024 | 3148.18 | 36.8031 | 4.81 GB (6.07%) | | 1 | 2048 | 2048 | 2927.33 | 35.2676 | 5.73 GB (7.23%) | <figcaption class="text-center text-gray-500 text-lg">ํ“จ์ฆˆ๋œ ๋ชจ๋“ˆ</figcaption> | ๋ฐฐ์น˜ ํฌ๊ธฐ | ํ”„๋ฆฌํ•„ ๊ธธ์ด | ๋””์ฝ”๋“œ ๊ธธ์ด | ํ”„๋ฆฌํ•„ ํ† ํฐ/์ดˆ | ๋””์ฝ”๋“œ ํ† ํฐ/์ดˆ | ๋ฉ”๋ชจ๋ฆฌ (VRAM) | |-------------:|-----------------:|----------------:|-------------------:|------------------:|:----------------| | 1 | 32 | 32 | 81.4899 | 80.2569 | 4.00 GB (5.05%) | | 1 | 64 | 64 | 1756.1 | 106.26 | 4.00 GB (5.05%) | | 1 | 128 | 128 | 2479.32 | 105.631 | 4.00 GB (5.06%) | | 1 | 256 | 256 | 1813.6 | 85.7485 | 4.01 GB (5.06%) | | 1 | 512 | 512 | 2848.9 | 97.701 | 4.11 GB (5.19%) | | 1 | 1024 | 1024 | 3044.35 | 87.7323 | 4.41 GB (5.57%) | | 1 | 2048 | 2048 | 2715.11 | 89.4709 | 5.57 GB (7.04%) | ํ“จ์ฆˆ๋œ ๋ชจ๋“ˆ ๋ฐ ํ“จ์ฆˆ๋˜์ง€ ์•Š์€ ๋ชจ๋“ˆ์˜ ์†๋„์™€ ์ฒ˜๋ฆฌ๋Ÿ‰์€ [optimum-benchmark](https://github.com/huggingface/optimum-benchmark)๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ํ…Œ์ŠคํŠธ ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/fused_forward_memory_plot.png" alt="generate throughput per batch size" /> <figcaption class="mt-2 text-center text-sm text-gray-500">ํฌ์›Œ๋“œ ํ”ผํฌ ๋ฉ”๋ชจ๋ฆฌ (forward peak memory)/๋ฐฐ์น˜ ํฌ๊ธฐ</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/fused_generate_throughput_plot.png" alt="forward latency per batch size" /> <figcaption class="mt-2 text-center text-sm text-gray-500"> ์ƒ์„ฑ ์ฒ˜๋ฆฌ๋Ÿ‰/๋ฐฐ์น˜ํฌ๊ธฐ</figcaption> </div> </div> </hfoption> <hfoption id="unsupported architectures"> ํ“จ์ฆˆ๋œ ๋ชจ๋“ˆ์„ ์ง€์›ํ•˜์ง€ ์•Š๋Š” ์•„ํ‚คํ…์ฒ˜์˜ ๊ฒฝ์šฐ, `modules_to_fuse` ๋งค๊ฐœ๋ณ€์ˆ˜๋ฅผ ์‚ฌ์šฉํ•ด ์ง์ ‘ ํ“จ์ฆˆ ๋งคํ•‘์„ ๋งŒ๋“ค์–ด ์–ด๋–ค ๋ชจ๋“ˆ์„ ํ“จ์ฆˆํ• ์ง€ ์ •์˜ํ•ด์•ผํ•ฉ๋‹ˆ๋‹ค. ์˜ˆ๋กœ, [TheBloke/Yi-34B-AWQ](https://huggingface.co/TheBloke/Yi-34B-AWQ) ๋ชจ๋ธ์˜ AWQ ๋ชจ๋“ˆ์„ ํ“จ์ฆˆํ•˜๋Š” ๋ฐฉ๋ฒ•์ž…๋‹ˆ๋‹ค. ```python import torch from transformers import AwqConfig, AutoModelForCausalLM model_id = "TheBloke/Yi-34B-AWQ" quantization_config = AwqConfig( bits=4, fuse_max_seq_len=512, modules_to_fuse={ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"], "layernorm": ["ln1", "ln2", "norm"], "mlp": ["gate_proj", "up_proj", "down_proj"], "use_alibi": False, "num_attention_heads": 56, "num_key_value_heads": 8, "hidden_size": 7168 } ) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config).to(0) ``` `modules_to_fuse` ๋งค๊ฐœ๋ณ€์ˆ˜๋Š” ๋‹ค์Œ์„ ํฌํ•จํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค: - `"attention"`: ์–ดํ…์…˜ ๋ ˆ์ด์–ด๋Š” ๋‹ค์Œ ์ˆœ์„œ๋กœ ํ“จ์ฆˆํ•˜์„ธ์š” : ์ฟผ๋ฆฌ (query), ํ‚ค (key), ๊ฐ’ (value) , ์ถœ๋ ฅ ํ”„๋กœ์ ์…˜ ๊ณ„์ธต (output projection layer). ํ•ด๋‹น ๋ ˆ์ด์–ด๋ฅผ ํ“จ์ฆˆํ•˜์ง€ ์•Š์œผ๋ ค๋ฉด ๋นˆ ๋ฆฌ์ŠคํŠธ๋ฅผ ์ „๋‹ฌํ•˜์„ธ์š”. - `"layernorm"`: ์‚ฌ์šฉ์ž ์ •์˜ ํ“จ์ฆˆ ๋ ˆ์ด์–ด ์ •๊ทœํ™”๋กœ ๊ตํ•  ๋ ˆ์ด์–ด ์ •๊ทœํ™” ๋ ˆ์ด์–ด๋ช…. ํ•ด๋‹น ๋ ˆ์ด์–ด๋ฅผ ํ“จ์ฆˆํ•˜์ง€ ์•Š์œผ๋ ค๋ฉด ๋นˆ ๋ฆฌ์ŠคํŠธ๋ฅผ ์ „๋‹ฌํ•˜์„ธ์š”. - `"mlp"`: ๋‹จ์ผ MLP ๋ ˆ์ด์–ด๋กœ ํ“จ์ฆˆํ•  MLP ๋ ˆ์ด์–ด ์ˆœ์„œ : (๊ฒŒ์ดํŠธ (gate) (๋ด์Šค(dense), ๋ ˆ์ด์–ด(layer), ํฌ์ŠคํŠธ ์–ดํ…์…˜(post-attention)) / ์œ„ / ์•„๋ž˜ ๋ ˆ์ด์–ด). - `"use_alibi"`: ๋ชจ๋ธ์ด ALiBi positional embedding์„ ์‚ฌ์šฉํ•  ๊ฒฝ์šฐ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค. - `"num_attention_heads"`: ์–ดํ…์…˜ ํ—ค๋“œ (attention heads)์˜ ์ˆ˜๋ฅผ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค. - `"num_key_value_heads"`: ๊ทธ๋ฃนํ™” ์ฟผ๋ฆฌ ์–ดํ…์…˜ (GQA)์„ ๊ตฌํ˜„ํ•˜๋Š”๋ฐ ์‚ฌ์šฉ๋˜๋Š” ํ‚ค ๊ฐ’ ํ—ค๋“œ์˜ ์ˆ˜๋ฅผ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค. `num_key_value_heads=num_attention_heads`๋กœ ์„ค์ •ํ•  ๊ฒฝ์šฐ, ๋ชจ๋ธ์€ ๋‹ค์ค‘ ํ—ค๋“œ ์–ดํ…์…˜ (MHA)๊ฐ€ ์‚ฌ์šฉ๋˜๋ฉฐ, `num_key_value_heads=1` ๋Š” ๋‹ค์ค‘ ์ฟผ๋ฆฌ ์–ดํ…์…˜ (MQA)๊ฐ€, ๋‚˜๋จธ์ง€๋Š” GQA๊ฐ€ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค. - `"hidden_size"`: ์ˆจ๊ฒจ์ง„ ํ‘œํ˜„(hidden representations)์˜ ์ฐจ์›์„ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค. </hfoption> </hfoptions> ## ExLlama-v2 ์„œํฌํŠธ [[exllama-v2-support]] ์ตœ์‹  ๋ฒ„์ „ `autoawq`๋Š” ๋น ๋ฅธ ํ”„๋ฆฌํ•„๊ณผ ๋””์ฝ”๋”ฉ์„ ์œ„ํ•ด ExLlama-v2 ์ปค๋„์„ ์ง€์›ํ•ฉ๋‹ˆ๋‹ค. ์‹œ์ž‘ํ•˜๊ธฐ ์œ„ํ•ด ๋จผ์ € ์ตœ์‹  ๋ฒ„์ „ `autoawq` ๋ฅผ ์„ค์น˜ํ•˜์„ธ์š” : ```bash pip install git+https://github.com/casper-hansen/AutoAWQ.git ``` ๋งค๊ฐœ๋ณ€์ˆ˜๋ฅผ `version="exllama"`๋กœ ์„ค์ •ํ•ด `AwqConfig()`๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ๋ชจ๋ธ์— ๋„˜๊ฒจ์ฃผ์„ธ์š”. ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, AwqConfig quantization_config = AwqConfig(version="exllama") model = AutoModelForCausalLM.from_pretrained( "TheBloke/Mistral-7B-Instruct-v0.1-AWQ", quantization_config=quantization_config, device_map="auto", ) input_ids = torch.randint(0, 100, (1, 128), dtype=torch.long, device="cuda") output = model(input_ids) print(output.logits) tokenizer = AutoTokenizer.from_pretrained("TheBloke/Mistral-7B-Instruct-v0.1-AWQ") input_ids = tokenizer.encode("How to make a cake", return_tensors="pt").to(model.device) output = model.generate(input_ids, do_sample=True, max_length=50, pad_token_id=50256) print(tokenizer.decode(output[0], skip_special_tokens=True)) ``` <Tip warning={true}> ์ด ๊ธฐ๋Šฅ์€ AMD GPUs์—์„œ ์ง€์›๋ฉ๋‹ˆ๋‹ค. </Tip>
transformers/docs/source/ko/quantization/awq.md/0
{ "file_path": "transformers/docs/source/ko/quantization/awq.md", "repo_id": "transformers", "token_count": 7293 }
437